text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Copyright (c) 2012 - N.P. de Klein
#
# This file is part of Python Mass Spec Analyzer (PyMSA).
#
# Python Mass Spec Analyzer (PyMSA) is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Python Mass Spec Analyzer (PyMSA) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Python Mass Spec Analyzer (PyMSA). If not, see <http://www.gnu.org/licenses/>.")
"""
Unit test of test_Plots.py
"""
# author: ndeklein
# date:10/02/2012
# summary: Unit testing functionality of the test_Plots.py script
import sys
import os
# to be able to import unittest2 from a locally installed unittest2
try:
sys.path.append('/homes/ndeklein/python2.6/site-packages')
except:
pass
import pymzml
# some magic to import from pyMS. dirname(dirname(__file__)) gets the two directories closer to the root.
# this is so that pyMS is added to the pythonpath and you can do import fileHandling.py
# if this is made in a package from pyMS import fileHandling should also work
dirname = os.path.dirname
sys.path.append(os.path.join(dirname(dirname(__file__))))
import unittest2 as unittest
import config
import plots
import mzmlFunctions
configHandle = config.ConfigHandle()
config = configHandle.getConfig()
testFolder = os.path.join(os.path.dirname(__file__), config.get('test','testfilefolder'))
testDatabase = os.path.join(os.path.dirname(__file__), config.get('test', 'testdatabase'))
class testPlots(unittest.TestCase):
"""
A test class for the fileHandling module.
B{TODO:}
Write some assertions to make sure the right values get inserted (some do, but some only check if there is no error when inserting)
"""
#
# def test_massWindow_XIC_plot(self):
# mzmlInstance = pymzml.run.Reader(testFolder+'mzml_testFIle_withBinary.mzML')
# retentionTime, intensity = mzmlFunctions.getIntensityFromMZwindow(mzmlInstance, 0, 2000)
# plots.massWindow_XIC_plot(retentionTime, intensity)
##
# def test_massWindow_XIC_plotException(self):
# mzmlInstance = pymzml.run.Reader(testFolder+'mzml_test_file_1.mzML')
# self.assertRaises(TypeError, {'dummy':'dict'}, 'not a dict')
# self.assertRaises(TypeError, 'not a dict', {'dummy':'dict'})
def test_parent_to_XIC_plot(self):
mzmlInstance = pymzml.run.Reader('/homes/dmamartin/Documents/PROTEOMICS/Cantrell/openMS/JG-C1-1A.mzML')
plots.parent_to_XIC_plot(mzmlInstance, 1597.7015045641685, 4)
# def test_parent_to_XIC_plotException(self):
# mzmlInstance = pymzml.run.Reader(testFolder+'mzml_test_file_1.mzML')
# self.assertRaises(TypeError, plots.massWindow_XIC_plot, 'not a pymzml.run.Reader() instance', 1200, 4)
# self.assertRaises(TypeError, plots.massWindow_XIC_plot, mzmlInstance, 'not an int', 4)
# self.assertRaises(TypeError, plots.massWindow_XIC_plot, mzmlInstance, 1200, 'not an int')
#
def suite():
suite = unittest.TestSuite()
# adding the unit tests to the test suite
suite.addTest(unittest.makeSuite(testPlots))
return suite
unittest.TextTestRunner(verbosity=2).run(suite())
|
davidmam/pyMSA
|
build/lib/pyMSA/test/test_plots.py
|
Python
|
gpl-3.0
| 3,576
|
[
"OpenMS"
] |
28802befe385186dacf0bc835acc34845ab641e53dce3221bf47cf483a727912
|
from lettuce import step, world
from survey.features.page_objects.question_module import QuestionModuleList, NewQuestionModule, EditQuestionModulePage
from survey.models import QuestionModule
@step(u'And I have two question modules')
def and_i_have_two_question_modules(step):
world.health_module = QuestionModule.objects.create(name="Health")
world.education_module = QuestionModule.objects.create(name="Education")
@step(u'When I visit the list questions modules page')
def when_i_visit_the_list_questions_modules_page(step):
world.page = QuestionModuleList(browser=world.browser)
world.page.visit()
@step(u'Then I should see the questions modules listed')
def then_i_should_see_the_questions_modules_listed(step):
fields = [world.health_module.name, world.education_module.name, 'Number', 'Module Name', 'Module Lists']
world.page.validate_fields_present(fields)
@step(u'When I visit the create questions module page')
def when_i_visit_the_create_questions_module_page(step):
world.page = NewQuestionModule(world.browser)
world.page.visit()
world.page.validate_url()
@step(u'And I fill in the question module details')
def and_i_fill_in_the_question_module_details(step):
world.page.fill_valid_values({'name': 'Education'})
@step(u'Then I should see that the question module on the listing page')
def then_i_should_see_that_the_question_module_on_the_listing_page(step):
world.page = QuestionModuleList(browser=world.browser)
fields = ['Education', 'Number', 'Module Name', 'Module Lists']
world.page.validate_fields_present(fields)
@step(u'And I click delete module')
def and_i_click_delete_module(step):
world.page.click_by_css("#delete-question-module_%s" % world.health_module.id)
@step(u'I should see a delete module confirmation modal')
def i_should_see_a_confirmation_modal(step):
world.page.see_confirm_modal_message(world.health_module.name)
@step(u'When I confirm delete')
def when_i_confirm_delete(step):
world.page.click_by_css("#delete-module-%s" % world.health_module.id)
@step(u'Then I should see the module was deleted')
def then_i_should_see_the_module_was_deleted(step):
world.page.see_success_message("Module", "deleted")
@step(u'And I click edit module')
def and_i_click_edit_module(step):
world.page.click_by_css("#edit-module_%s" % world.health_module.id)
@step(u'I should see a edit module page')
def i_should_see_a_edit_module_page(step):
world.page = EditQuestionModulePage(world.browser, world.health_module)
world.page.validate_url()
@step(u'When I fill in valid values')
def when_i_fill_in_valid_values(step):
world.page.fill_valid_values({'name': 'Edited Module'})
@step(u'Then I should see the edited question module')
def then_i_should_see_the_edited_question_module(step):
world.page = QuestionModuleList(world.browser)
assert not world.page.browser.find_link_by_text(world.health_module.name)
world.page.is_text_present('Edited Module')
world.page.see_success_message("Question module", "edited")
|
antsmc2/mics
|
survey/features/question_module-steps.py
|
Python
|
bsd-3-clause
| 3,047
|
[
"VisIt"
] |
a301b7fcf77fbeec54967ece16838b243fe11e81b7128df4f1dd63a97a2d5c40
|
"""
Geckoboard decorators.
"""
import base64
import json
from types import ListType, TupleType
try:
from Crypto.Cipher import AES
from Crypto import Random
from hashlib import md5
encryption_enabled = True
except ImportError:
encryption_enabled = False
from functools import wraps
from collections import OrderedDict
from flask import abort
from flask import request
from flask import current_app as app
TEXT_NONE = 0
TEXT_INFO = 2
TEXT_WARN = 1
class WidgetDecorator(object):
"""
Geckoboard widget decorator.
The decorated view must return a data structure suitable for
serialization to XML or JSON for Geckoboard. See the Geckoboard
API docs or the source of extending classes for details.
If the ``GECKOBOARD_API_KEY`` setting is used, the request must
contain the correct API key, or a 403 Forbidden response is
returned.
If the ``encrypted` argument is set to True, then the data will be
encrypted using ``GECKOBOARD_PASSWORD`` (JSON only).
"""
def __new__(cls, *args, **kwargs):
obj = object.__new__(cls)
obj._encrypted = None
if 'encrypted' in kwargs:
if not encryption_enabled:
raise GeckoboardException(
'Use of encryption requires the pycrypto package. ' + \
'This package can be installed manually or by enabling ' + \
'the encryption feature during installation.'
)
obj._encrypted = kwargs.pop('encrypted')
obj._format = None
if 'format' in kwargs:
obj._format = kwargs.pop('format')
obj.data = kwargs
try:
return obj(args[0])
except IndexError:
return obj
def __call__(self, view_func):
@wraps(view_func)
def decorated_view(*args, **kwargs):
if not _is_api_key_correct():
abort(403)
view_result = view_func(*args, **kwargs)
data = self._convert_view_result(view_result)
try:
self.data.update(data)
except ValueError:
self.data = data
content, content_type = _render(self.data, self._encrypted, self._format)
return app.response_class(content, mimetype=content_type)
return decorated_view
def _convert_view_result(self, data):
# Extending classes do view result mangling here.
return data
widget = WidgetDecorator
class NumberWidgetDecorator(WidgetDecorator):
"""
Geckoboard Number widget decorator.
The decorated view must return a tuple `(current, [previous])`, where
`current` is the current value and `previous` is the previous value
of the measured quantity..
"""
def _convert_view_result(self, result):
if not isinstance(result, (tuple, list)):
result = [result]
result = list(result)
for k, v in enumerate(result):
result[k] = v if isinstance(v, dict) else {'value': v}
return {'item': result}
number_widget = NumberWidgetDecorator
class RAGWidgetDecorator(WidgetDecorator):
"""
Geckoboard Red-Amber-Green (RAG) widget decorator.
The decorated view must return a tuple with three tuples `(value,
[text])`. The `value` parameters are the numbers shown in red,
amber and green (in that order). The `text` parameters are optional
and will be displayed next to the respective values in the
dashboard.
"""
def _convert_view_result(self, result):
items = []
for elem in result:
if not isinstance(elem, (tuple, list)):
elem = [elem]
item = OrderedDict()
if elem[0] is None:
item['value'] = ''
else:
item['value'] = elem[0]
if len(elem) > 1:
item['text'] = elem[1]
items.append(item)
return {'item': items}
rag_widget = RAGWidgetDecorator
class TextWidgetDecorator(WidgetDecorator):
"""
Geckoboard Text widget decorator.
The decorated view must return a list of tuples `(message, [type])`.
The `message` parameters are strings that will be shown in the
widget. The `type` parameters are optional and tell Geckoboard how
to annotate the messages. Use ``TEXT_INFO`` for informational
messages, ``TEXT_WARN`` for for warnings and ``TEXT_NONE`` for plain
text (the default).
"""
def _convert_view_result(self, result):
items = []
if not isinstance(result, (tuple, list)):
result = [result]
for elem in result:
if not isinstance(elem, (tuple, list)):
elem = [elem]
item = OrderedDict()
item['text'] = elem[0]
if len(elem) > 1 and elem[1] is not None:
item['type'] = elem[1]
else:
item['type'] = TEXT_NONE
items.append(item)
return {'item': items}
text_widget = TextWidgetDecorator
class PieChartWidgetDecorator(WidgetDecorator):
"""
Geckoboard Pie chart decorator.
The decorated view must return a list of tuples `(value, label,
color)`. The color parameter is a string 'RRGGBB[TT]' representing
red, green, blue and optionally transparency.
"""
def _convert_view_result(self, result):
items = []
for elem in result:
if not isinstance(elem, (tuple, list)):
elem = [elem]
item = OrderedDict()
item['value'] = elem[0]
if len(elem) > 1:
item['label'] = elem[1]
if len(elem) > 2:
item['colour'] = elem[2]
items.append(item)
return {'item': items}
pie_chart = PieChartWidgetDecorator
class LineChartWidgetDecorator(WidgetDecorator):
"""
Geckoboard Line chart decorator.
The decorated view must return a tuple `(values, x_axis, y_axis,
[color])`. The `values` parameter is a list of data points. The
`x-axis` parameter is a label string or a list of strings, that will
be placed on the X-axis. The `y-axis` parameter works similarly for
the Y-axis. If there are more than one axis label, they are placed
evenly along the axis. The optional `color` parameter is a string
``'RRGGBB[TT]'`` representing red, green, blue and optionally
transparency.
"""
def _convert_view_result(self, result):
data = OrderedDict()
data['item'] = list(result[0])
data['settings'] = OrderedDict()
if len(result) > 1:
x_axis = result[1]
if x_axis is None:
x_axis = ''
if not isinstance(x_axis, (tuple, list)):
x_axis = [x_axis]
data['settings']['axisx'] = x_axis
if len(result) > 2:
y_axis = result[2]
if y_axis is None:
y_axis = ''
if not isinstance(y_axis, (tuple, list)):
y_axis = [y_axis]
data['settings']['axisy'] = y_axis
if len(result) > 3:
data['settings']['colour'] = result[3]
return data
line_chart = LineChartWidgetDecorator
class GeckOMeterWidgetDecorator(WidgetDecorator):
"""
Geckoboard Geck-O-Meter decorator.
The decorated view must return a tuple `(value, min, max)`. The
`value` parameter represents the current value. The `min` and `max`
parameters represent the minimum and maximum value respectively.
They are either a value, or a tuple `(value, text)`. If used, the
`text` parameter will be displayed next to the minimum or maximum
value.
"""
def _convert_view_result(self, result):
value, min, max = result
data = OrderedDict()
data['item'] = value
data['max'] = OrderedDict()
data['min'] = OrderedDict()
if not isinstance(max, (tuple, list)):
max = [max]
data['max']['value'] = max[0]
if len(max) > 1:
data['max']['text'] = max[1]
if not isinstance(min, (tuple, list)):
min = [min]
data['min']['value'] = min[0]
if len(min) > 1:
data['min']['text'] = min[1]
return data
geck_o_meter = GeckOMeterWidgetDecorator
class FunnelWidgetDecorator(WidgetDecorator):
"""
Geckoboard Funnel decorator.
The decorated view must return a dictionary with at least an `items`
entry: `{'items': [(100, '100 %'), (50, '50 %')]}`.
Optional keys are:
type: 'standard' (default) or 'reverse'. Determines the
order of the colours.
percentage: 'show' (default) or 'hide'. Determines whether or
not the percentage value is shown.
sort: `False` (default) or `True`. Sort the entries by
value or not.
"""
def _convert_view_result(self, result):
data = OrderedDict()
items = result.get('items', [])
# sort the items in order if so desired
if result.get('sort'):
items.sort(reverse=True)
data["item"] = [dict(zip(("value","label"), item)) for item in items]
data["type"] = result.get('type', 'standard')
data["percentage"] = result.get('percentage','show')
return data
funnel = FunnelWidgetDecorator
class BulletWidgetDecorator(WidgetDecorator):
"""
See http://support.geckoboard.com/entries/274940-custom-chart-widget-type-definitions
for more information.
The decorated method must return a dictionary containing these keys:
Required keys:
label: Main label, eg. "Revenue 2011 YTD".
axis_points: Points on the axis, eg. [0, 200, 400, 600, 800, 1000].
current: Current value range, eg. 500 or [100, 500]. A singleton
500 is internally converted to [0, 500].
comparative: Comparative value, eg. 600.
Optional keys:
orientation: One of 'horizontal' or 'vertical'. Defaults to horizontal.
sublabel: Appears below main label.
range: Ordered list of color ranges:
[{'color': 'red', 'start': 0, 'end': 1},
{'color': 'amber', 'start': 1, 'end': 5},
{'color': 'green', 'start': 5, 'end': 10}]
Defaults are calculated from axis_points.
projected: Projected value range, eg. 900 or [100, 900]. A singleton
900 is internally converted to [0, 900].
auto_scale: If true then values will be scaled down if they
do not fit into Geckoboard's UI, eg. a value of 1100
is represented as 1.1. If scaling takes place the sublabel
is suffixed with that information. Default is true.
"""
def _convert_view_result(self, results):
# Check required keys. We do not do type checking since this level of
# competence is assumed.
if not isinstance(results, list):
results = [results]
items = []
for result in results:
for key in ('label', 'axis_points', 'current'):
if not result.has_key(key):
raise RuntimeError, "Key %s is required" % key
# Handle singleton current and projected
current = result['current']
projected = result.get('projected', None)
if not isinstance(current, (ListType, TupleType)):
current = [0, current]
if (projected is not None) and not isinstance(projected, (ListType,
TupleType)):
projected = [0, projected]
# If red, amber and green are not *all* supplied calculate defaults
axis_points = result['axis_points']
_range = result.get('range', [])
if not _range:
if axis_points:
max_point = max(axis_points)
min_point = min(axis_points)
third = (max_point - min_point) / 3
_range.append({'color': 'red',
'start': min_point,
'end': min_point + third - 1})
_range.append({'color': 'amber',
'start': min_point + third,
'end': max_point - third - 1})
_range.append({'color': 'red',
'start': max_point - third,
'end': max_point})
else:
_range = [{'color': 'red', 'start': 0, 'end': 0},
{'color': 'amber', 'start': 0, 'end': 0},
{'color': 'green', 'start': 0, 'end': 0}]
# Scan axis points for largest value and scale to avoid overflow in
# Geckoboard's UI.
auto_scale = result.get('auto_scale', True)
if auto_scale and axis_points:
scale_label_map = {1000000000: 'billions', 1000000: 'millions',
1000: 'thousands'}
scale = 1
value = max(axis_points)
for n in (1000000000, 1000000, 1000):
if value >= n:
scale = n
break
# Little fixedpoint helper.
# todo: use a fixedpoint library
def scaler(value, scale):
return float('%.2f' % (value*1.0 / scale))
# Apply scale to all values
if scale > 1:
axis_points = [scaler(v, scale) for v in axis_points]
current = (scaler(current[0], scale), scaler(current[1], scale))
if projected is not None:
projected = (scaler(projected[0], scale),
scaler(projected[1], scale))
red = (scaler(red[0], scale), scaler(red[1], scale))
amber = (scaler(amber[0], scale), scaler(amber[1], scale))
green = (scaler(green[0], scale), scaler(green[1], scale))
if 'comparative' in result:
result['comparative'] = scaler(result['comparative'], scale)
# Suffix sublabel
sublabel = result.get('sublabel', '')
if sublabel:
result['sublabel'] = '%s (%s)' % \
(sublabel, scale_label_map[scale])
else:
result['sublabel'] = scale_label_map[scale].capitalize()
# Assemble structure
data = dict(
label=result['label'],
axis=dict(point=axis_points),
range=_range,
measure=dict(current=dict(start=current[0], end=current[1])))
if 'comparative' in result:
data['comparative'] = dict(point=result['comparative'])
# Add optional items
if result.has_key('sublabel'):
data['sublabel'] = result['sublabel']
if projected is not None:
data['measure']['projected'] = dict(start=projected[0],
end=projected[1])
items.append(data)
return dict(item=items,
orientation=result.get('orientation', 'horizontal'),)
bullet = BulletWidgetDecorator
def _is_api_key_correct():
"""Return whether the Geckoboard API key on the request is correct."""
api_key = app.config.get('GECKOBOARD_API_KEY')
if api_key is None:
return True
auth = request.authorization
if auth:
if auth.type == 'basic':
return auth.username == api_key and auth.password == 'X'
return False
def _derive_key_and_iv(password, salt, key_length, iv_length):
d = d_i = ''
while len(d) < key_length + iv_length:
d_i = md5(d_i + password + salt).digest()
d += d_i
return d[:key_length], d[key_length:key_length+iv_length]
def _encrypt(data):
"""Equivalent to OpenSSL using 256 bit AES in CBC mode"""
BS = AES.block_size
pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
password = app.config.get('GECKOBOARD_PASSWORD')
salt = Random.new().read(BS - len('Salted__'))
key, iv = _derive_key_and_iv(password, salt, 32, BS)
cipher = AES.new(key, AES.MODE_CBC, iv)
encrypted = 'Salted__' + salt + cipher.encrypt(pad(data))
return base64.b64encode(encrypted)
def _render(data, encrypted, format=None):
"""
Render the data to Geckoboard. If the `format` parameter is passed
to the widget it defines the output format. Otherwise the output
format is based on the `format` request parameter.
A `format` paramater of ``json`` or ``2`` renders JSON output, any
other value renders XML.
"""
return _render_json(data, encrypted)
def _render_json(data, encrypted=False):
data_json = json.dumps(data)
if encrypted:
data_json = _encrypt(data_json)
return data_json, 'application/json'
class GeckoboardException(Exception):
"""
Represents an error with the Geckoboard decorators.
"""
|
rossdeane/flask-geckoboard
|
flask_geckoboard/decorators.py
|
Python
|
mit
| 17,376
|
[
"Amber"
] |
14d3be302464854567596b77857df45b67399c335593651163e809597c235a65
|
'''Launches browser to visit given urls (local and remote).
Completes short urls like "blacktrash.org" automagically.
$BROWSER environment may be overridden with option "-b".
'''
# $Id$
import optparse, sys
from muttils import pybrowser, util
proginfo = 'Pybrowser - python interface to system browsers'
def run():
'''Runs the pybrowser script.'''
parser = optparse.OptionParser(usage='%prog [option] [urls]',
description=__doc__,
version=util.fullversion(proginfo))
parser.set_defaults(app=None)
parser.add_option('-b', '--browser', dest='app',
help='prefer browser APP over $BROWSER environment')
options, args = parser.parse_args()
del parser
try:
b = pybrowser.browser(items=args, app=options.app, evalurl=True)
b.urlvisit()
except util.DeadMan, inst:
sys.exit(inst)
|
cwarden/muttils
|
muttils/pybrowsercommand.py
|
Python
|
gpl-2.0
| 926
|
[
"VisIt"
] |
57b98d40507f93caf9fbd6dc328d97173ebe59c9eba836b0c5fc0e947031b2fe
|
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import p4
import sys
from collections import defaultdict
from dependencies import *
import itertools
from analysis_utils import retrieve_from_one_action, reset_state
import logging
"""
This module uses the control flow graph exposed by the HLIR and produces a table
dependency graph with as few dependencies as possible to simplify further
processing. The module includes code to write a .dot file representing the
graph. The graph is represented by a rmt_table_graph object. Nodes in the graph
are instances of rmt_p4_table or rmt_conditional_tables (both are subclasses
of rmt_table). Edges of the graph are instances of rmt_table_dependency.
Run rmt_build_table_graph_ingress() to get a rmt_table_graph object representing
the ingress pipeline, run rmt_build_table_graph_egress for the egress pipeline,
"""
logger = logging.getLogger(__name__)
class Dependency:
# NOP < REVERSE_READ < SUCCESSOR < ACTION < MATCH
NOP = -2
CONTROL_FLOW = -1
REDUNDANT = 0
REVERSE_READ = 1
PREDICATION = 4
SUCCESSOR = 5
ACTION = 6
MATCH = 7
_types = {NOP: "NOP", REVERSE_READ: "REVERSE_READ",
PREDICATION: "PREDICATION", SUCCESSOR: "SUCCESSOR",
ACTION: "ACTION", MATCH: "MATCH"}
@staticmethod
def get(type_):
return Dependency._types[type_]
class rmt_table(object):
def __init__(self, table_name, conditional_barrier = None, p4_table = None):
self.name = table_name
# outgoing edges. Maps a rmt_table to a rmt_table_dependency.
self.next_tables = {}
self.incoming = {}
# to keep track of the initial control flow edges
self.next_tables_control = set()
self.incoming_control = set()
self.p4_table = p4_table
# to figure out the dependencies, we will need the match fields and the
# action fields
self.match_fields = set()
# action fields only for p4 tables ?
self.action_fields = set()
self.action_fields_read = set()
self.action_fields_write = set()
# makes sure that a table cannot escape from its conditional block, used
# to establish SUCCESSOR dependencies
# it is tuple (ancestor table, True | False)
self.conditional_barrier = conditional_barrier
def get_special_fields(self):
return set(), set(), set()
# places all fields of a header instance in field_set
def get_all_subfields(field, field_set):
if isinstance(field, p4.p4_field):
field_set.add(field)
elif isinstance(field, p4.p4_header_instance):
for subfield in field.fields:
get_all_subfields(subfield, field_set)
else:
assert(False)
class rmt_conditional_table(rmt_table):
cnt = 0 # to give a name to the table
def __init__(self, p4_table, conditional_barrier = None):
rmt_conditional_table.cnt += 1
super(rmt_conditional_table, self).__init__(
p4_table.name, conditional_barrier,
p4_table)
self.condition = p4_table.condition
self.match_fields = self.p4_table.retrieve_match_fields()
class rmt_p4_table(rmt_table):
def __init__(self, p4_table, conditional_barrier = None):
super(rmt_p4_table, self).__init__(p4_table.name,
conditional_barrier,
p4_table)
self.min_size = p4_table.min_size
self.max_size = p4_table.max_size
self.match_fields = self.p4_table.retrieve_match_fields()
self._retrieve_action_fields()
r, w, a = self.get_special_fields()
self.action_fields_read.update(r)
self.action_fields_write.update(w)
self.action_fields.update(a)
# not really needed any more
def _retrieve_action_fields(self):
for action in self.p4_table.actions:
r, w, a = retrieve_from_one_action(action)
self.action_fields_read.update(r)
self.action_fields_write.update(w)
self.action_fields.update(a)
def get_action_fields(self):
return self.action_fields_read, self.action_fields_write, self.action_fields
def get_special_fields(self):
r, w, a = set(), set(), set()
for p4_meter in self.p4_table.attached_meters:
if p4_meter.binding and (p4_meter.binding[0] == p4.P4_DIRECT):
w.add(p4_meter.result)
a.add(p4_meter.result)
return r, w, a
class rmt_table_dependency():
def __init__(self, from_, to,
type_ = Dependency.CONTROL_FLOW,
action_set = None):
self.from_ = from_
self.to = to
# type is not resolved when we add the dependency, but later, when
# rmt_table_graph.resolve_dependencies() is called explicitely
self.type_ = type_
# added to support predication. The action fields are no longer
# associated with a table (parent table, i.e. from), but with the
# dependency (or edge) itself.
self.action_set = action_set
self.action_fields_read = set()
self.action_fields_write = set()
self.action_fields = set()
# special "hit" "miss" case
if action_set and ("hit" in action_set or "miss" in action_set):
(self.action_fields_read,
self.action_fields_write,
self.action_fields) = self.from_.get_action_fields()
elif action_set:
for action in action_set:
r, w, a = retrieve_from_one_action(action)
self.action_fields_read.update(r)
self.action_fields_write.update(w)
self.action_fields.update(a)
r, w, a = self.from_.get_special_fields()
self.action_fields_read.update(r)
self.action_fields_write.update(w)
self.action_fields.update(a)
# fields that induce the dependency
self.fields = {}
# for conditional dependencies
self.cond = None
# def __eq__(self, other):
# return (self.from_ == other.from_ and self.to == other.to)
# def __ne__(self, other):
# return not self.__eq__(other)
def get_p4_dep(self):
if self.type_ == Dependency.MATCH:
return MatchDep(self.from_.p4_table,
self.to.p4_table,
self.fields)
elif self.type_ == Dependency.ACTION:
return ActionDep(self.from_.p4_table,
self.to.p4_table,
self.fields)
elif self.type_ == Dependency.SUCCESSOR:
return SuccessorDep(self.from_.p4_table,
self.to.p4_table,
self.fields,
self.cond)
elif self.type_ == Dependency.PREDICATION:
return SuccessorDep(self.from_.p4_table,
self.to.p4_table,
self.fields,
self.cond)
elif self.type_ == Dependency.REVERSE_READ:
return ReverseReadDep(self.from_.p4_table,
self.to.p4_table,
self.fields)
else:
return None
def is_match_dependency(self):
shared = self.action_fields_write & self.to.match_fields
if shared:
self.fields = shared
return True
return False
def is_action_dependency(self):
# if the field is shared and one action is "a writer"
shared = ( (self.action_fields_write & self.to.action_fields) )
# (self.action_fields & self.to.action_fields_write) )
if shared:
self.fields = shared
return True
return False
# predication and successor are essentially the same (predication in HW),
# but for more clarity we separate the dependencies introduced by
# conditionals in the control flow from the ones introduced by the
# next_table attribute in P4 table specification
def is_predication_dependency(self):
cbs = self.to.conditional_barrier
if not cbs:
return False
for cb in cbs:
if self.from_ == cb[0] and\
type(cb[1]) in {set, str, tuple, p4.p4_action}:
self.cond = cb[1]
return True
return False
def is_successor_dependency(self):
cbs = self.to.conditional_barrier
if not cbs:
return False
for cb in cbs:
if self.from_ == cb[0] and type(cb[1]) is bool:
self.cond = cb[1]
return True
return False
def is_reverse_read_dependency(self):
shared = ( (self.from_.match_fields & self.to.action_fields_write) |
(self.action_fields_read & self.to.action_fields_write) )
if shared:
self.fields = shared
return True
return False
def resolve_type(self, default = Dependency.NOP):
if self.is_match_dependency():
self.type_ = Dependency.MATCH
elif self.is_action_dependency():
self.type_ = Dependency.ACTION
elif self.is_successor_dependency():
self.type_ = Dependency.SUCCESSOR
elif self.is_predication_dependency():
self.type_ = Dependency.PREDICATION
elif self.is_reverse_read_dependency():
self.type_ = Dependency.REVERSE_READ
else:
self.type_ = default
return self.type_
class rmt_table_graph():
def __init__(self, create_ingress = False):
# ingress or egress
self.root = None
self._nodes = {}
# p4 nodes that we have visited (table or conditional). The
# dictionary maps each p4 node to its rmt_table_graph corresponding
# object (rmt_p4_table or rmt_conditional_table)
self._p4_visited = {}
self._validated = False
self._topo_sorting = None
def __contains__(self, table):
if type(table) is p4.p4_table or\
type(table) is p4.p4_conditional_node:
return table in self._p4_visited
print type(table)
assert(False)
def _add_table(self, table_rmt):
self._nodes[table_rmt.name] = table_rmt
if table_rmt.name == "ingress" or table_rmt.name == "egress":
assert(not self.root)
self.root = table_rmt
def field_used(self, field, root, exclude_set = set()):
for next_control_table in root.next_tables_control:
if next_control_table in exclude_set: continue
if field in next_control_table.match_fields or\
field in next_control_table.action_fields:
return True
if self.field_used(field, next_control_table): return True
return False
def resolve_cbs(self):
for t in self._nodes.values():
if not t.conditional_barrier:
continue
if type(t.conditional_barrier) is list:
t.conditional_barrier = [(self._p4_visited[x[0]], x[1]) for x in t.conditional_barrier]
else:
x = t.conditional_barrier
t.conditional_barrier = [(self._p4_visited[x[0]], x[1])]
def add_p4_node(self, p4_node):
assert(p4_node not in self)
cb_p4 = p4_node.conditional_barrier
if cb_p4:
if type(cb_p4[0]) is tuple:
cb = list(cb_p4)
else:
cb = cb_p4
else:
cb = None
if type(p4_node) is p4.p4_table:
table = rmt_p4_table(p4_node, cb)
else:
table = rmt_conditional_table(p4_node, cb)
self._add_table(table)
self._p4_visited[p4_node] = table
return table
# used for ingress and egress tables
def add_dummy_table(self, table_name):
table = rmt_table(table_name)
self._add_table(table)
return table
def get_table(self, p4_node):
assert(p4_node in self)
return self._p4_visited[p4_node]
def add_dependency(self, child, parent, action_set = None):
assert(child.name in self._nodes and parent.name in self._nodes)
dependency = rmt_table_dependency(parent, child,
action_set = action_set)
parent.next_tables[child] = dependency
child.incoming[parent] = dependency
self._validated = False
def topo_sorting(self):
if not self.root: return False
# slightly annoying because the graph is directed, we use a topological
# sorting algo
# see http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
# (second algo)
def visit(cur, sorted_list):
if cur.mark == 1:
return False
if cur.mark != 2:
cur.mark = 1
for dependency in cur.next_tables.values():
next_table = dependency.to
if not visit(next_table, sorted_list):
return False
cur.mark = 2
sorted_list.insert(0, cur)
return True
has_cycle = False
sorted_list = []
for n in self._nodes.values():
# 0 is unmarked, 1 is temp, 2 is permanent
n.mark = 0
for n in self._nodes.values():
if n.mark == 0:
if not visit(n, sorted_list):
has_cycle = True
break
for n in self._nodes.values():
del n.mark
return has_cycle, sorted_list
# make sure there is no cycles in the graph (must be called before resolving
# dependencies)
def validate(self):
has_cycle, _ = self.topo_sorting()
self._validated = not has_cycle
return self._validated
# Remove redundant edges with a transitive reduction algo in O(n^3), called
# after resolving dependencies
def transitive_reduction(self):
assert( self.validate() )
# for a given table (root_table), find alternate paths to its neighbors
# (root_neighbors). We need max_type_ because we only eliminate an edge
# if there is another path with a highest cost (where cost is given by
# the most expensive dependency along the path)
def transitive_reduction_rec(root_table, cur_table, root_neighbors,
max_type_ = 0, cache = {}):
if cur_table in cache and cache[cur_table] >= max_type_:
return
cache[cur_table] = max_type_
for dependency in cur_table.next_tables.values():
if dependency.type_ <= 0: continue
max_type_tmp = max(max_type_, dependency.type_)
next_table = dependency.to
# should not happen as it would mean a cycle
assert(root_table != cur_table)
if next_table in root_neighbors and\
max_type_tmp >= root_neighbors[next_table]:
root_table.next_tables[next_table].type_ = Dependency.REDUNDANT
next_table.incoming[root_table].type_ = Dependency.REDUNDANT
del root_neighbors[next_table]
transitive_reduction_rec(root_table, next_table, root_neighbors,
max_type_ = max_type_tmp, cache = cache)
# apply the algo to every node in the graph
for table in self._nodes.values():
# build list of neigbors, with the associated cost
neighbors = {}
for dependency in table.next_tables.values():
if dependency.type_ > 0:
neighbors[dependency.to] = dependency.type_
for dependency in table.next_tables.values():
if dependency.type_ > 0:
transitive_reduction_rec(table, dependency.to, neighbors,
max_type_ = dependency.type_,
cache = {})
assert( self.validate() )
# called after building the graph to resolve dependencies
def resolve_dependencies(self):
assert( self.validate() )
# We start by resolving the dependencies we have (CONTROL_FLOW) then we
# recursively compute all possible dependencies in the graph (we will
# run a transitive reduction algorithm later to remove redundancies)
for table in self._nodes.values():
for dependency in table.next_tables.values():
next_table = dependency.to
dependency.resolve_type(Dependency.CONTROL_FLOW)
table.next_tables_control.add(next_table)
next_table.incoming_control.add(table)
def resolve_rec(root_table, table, visited, action_set = None):
if table in visited: return
visited.add(table)
new_dependency = rmt_table_dependency(root_table, table,
action_set = action_set)
type_ = new_dependency.resolve_type()
if type_ != Dependency.NOP:
root_table.next_tables[table] = new_dependency
table.incoming[root_table] = new_dependency
for next_table in table.next_tables_control:
resolve_rec(root_table, next_table, visited, action_set)
for table in self._nodes.values():
for dependency in table.next_tables.values():
next_table = dependency.to
visited = set()
resolve_rec(table, next_table, visited, dependency.action_set)
assert( self.validate() )
def generate_dot(self, name = "ingress", out = sys.stdout,
min_dep = Dependency.CONTROL_FLOW,
with_condition_str = True,
debug = False):
styles = {Dependency.CONTROL_FLOW: "style=dotted",
Dependency.REVERSE_READ: "color=yellow",
Dependency.PREDICATION: "color=green",
Dependency.SUCCESSOR: "color=green",
Dependency.ACTION: "color=blue",
Dependency.MATCH: "color=red"}
out.write("digraph " + name + " {\n")
# set conditional tables to be represented as boxes
for table in self._nodes.values():
if isinstance(table, rmt_conditional_table):
if with_condition_str:
label = "\"" + table.name + "\\n" +\
str(table.condition) + "\""
label = "label=" + label
else:
label = table.name
out.write(table.name + " [shape=box " + label + "];\n")
for table in self._nodes.values():
for dependency in table.next_tables.values():
if dependency.type_ < min_dep:
continue
if dependency.type_ == Dependency.REDUNDANT:
continue
if debug:
dep_fields = []
for field in dependency.fields:
dep_fields.append(str(field))
edge_label = "label=\"" + ",\n".join(dep_fields) + "\""
edge_label += " decorate=true"
else:
edge_label = ""
if dependency.type_ == Dependency.SUCCESSOR:
if dependency.to.conditional_barrier[1] == False:
edge_label += " arrowhead = diamond"
else:
edge_label += " arrowhead = dot"
out.write(table.name + " -> " + dependency.to.name +\
" [" + styles[dependency.type_] +\
" " + edge_label + "]" + ";\n")
out.write("}\n")
def annotate_hlir(self):
for table in self._nodes.values():
for dependency in table.next_tables.values():
dep = dependency.get_p4_dep()
if not dep: continue # control flow...
dep.from_.dependencies_for[dep.to] = dep
dep.to.dependencies_to[dep.from_] = dep
# parses the control flow graph exposed in HLIR
# p4_node can be a p4_table or p4_conditional_node
def parse_p4_table_graph(table_graph, p4_node,
parent = None,
action_set = None):
if not p4_node: return # empty control flow
next_tables = p4_node.next_
visited = p4_node in table_graph
if visited:
table = table_graph.get_table(p4_node)
else:
table = table_graph.add_p4_node(p4_node)
table_graph.add_dependency(table, parent, action_set = action_set)
if visited: return
if(type(p4_node) is p4.p4_conditional_node):
for nt in next_tables.values():
if nt: parse_p4_table_graph(table_graph, nt, table,
action_set = None)
elif(type(p4_node) is p4.p4_table):
table_actions = defaultdict(set)
hit_miss = False
for a in next_tables.keys():
if a in {"hit", "miss"}:
hit_miss = True
break
nt = next_tables[a]
if nt: table_actions[nt].add(a)
if hit_miss:
def_action = None
if p4_node.default_action is not None:
def_action = p4_node.default_action[0]
for hit_or_miss, nt in next_tables.items():
if not nt: continue
if def_action is not None and hit_or_miss == "miss":
parse_p4_table_graph(table_graph, nt, table,
action_set = {def_action})
else:
parse_p4_table_graph(table_graph, nt, table,
action_set = {hit_or_miss})
else:
for nt, a_set in table_actions.items():
parse_p4_table_graph(table_graph, nt, table,
action_set = a_set)
else:
print type(p4_node)
assert(False)
def rmt_build_table_graph(name, entry):
table_graph = rmt_table_graph()
dummy_table = table_graph.add_dummy_table(name)
parse_p4_table_graph(table_graph, entry,
parent = dummy_table)
table_graph.resolve_cbs()
assert( table_graph.validate() )
table_graph.resolve_dependencies()
return table_graph
# returns a rmt_table_graph object for ingress
def rmt_build_table_graph_ingress(hlir):
return rmt_build_table_graph("ingress", hlir.p4_ingress_ptr.keys()[0])
# returns a rmt_table_graph object for egress
def rmt_build_table_graph_egress(hlir):
return rmt_build_table_graph("egress", hlir.p4_egress_ptr)
def rmt_gen_dot_table_graph_ingress(out):
table_graph = rmt_build_table_graph_ingress()
with open(out, 'w') as dotf:
table_graph.generate_dot(out = dotf,
with_condition_str = True,
debug = True)
def rmt_gen_dot_table_graph_egress(out):
table_graph = rmt_build_table_graph_egress()
with open(out, 'w') as dotf:
table_graph.generate_dot(out = dotf,
with_condition_str = True,
debug = True)
def annotate_hlir(hlir):
reset_state(include_valid = True)
for ingress_ptr in hlir.p4_ingress_ptr:
ingress_graph = rmt_build_table_graph_ingress(hlir)
ingress_graph.transitive_reduction()
ingress_graph.annotate_hlir()
if hlir.p4_egress_ptr is not None:
egress_graph = rmt_build_table_graph_egress(hlir)
egress_graph.transitive_reduction()
egress_graph.annotate_hlir()
reset_state(include_valid = False)
|
p4lang/p4-hlir
|
p4_hlir/hlir/table_dependency.py
|
Python
|
apache-2.0
| 24,705
|
[
"VisIt"
] |
391f109f51712bd000cb22563c13aae07e864d2809fe91ce0837f8d43bb49f4f
|
__author__ = 'sibirrer'
from astrofunc.LensingProfiles.gaussian import Gaussian
from astrofunc.LensingProfiles.gaussian_kappa import GaussianKappa
import numpy as np
import numpy.testing as npt
import pytest
class TestGaussian(object):
"""
tests the Gaussian methods
"""
def setup(self):
self.Gaussian = Gaussian()
def test_function(self):
x = 1
y = 2
amp = 1.*2*np.pi
center_x = 1.
center_y = 1.
sigma_x = 1.
sigma_y = 1.
values = self.Gaussian.function(x, y, amp, center_x, center_y, sigma_x, sigma_y)
assert values == np.exp(-1./2)
x = np.array([2,3,4])
y = np.array([1,1,1])
values = self.Gaussian.function(x, y, amp, center_x, center_y, sigma_x, sigma_y)
assert values[0] == np.exp(-1./2)
assert values[1] == np.exp(-2.**2/2)
assert values[2] == np.exp(-3.**2/2)
def test_derivatives(self):
x = 1
y = 2
amp = 1.*2*np.pi
center_x = 1.
center_y = 1.
sigma_x = 1.
sigma_y = 1.
values = self.Gaussian.derivatives( x, y, amp, center_x, center_y, sigma_x, sigma_y)
assert values[0] == 0.
assert values[1] == -np.exp(-1./2)
x = np.array([2,3,4])
y = np.array([1,1,1])
values = self.Gaussian.derivatives( x, y, amp, center_x, center_y, sigma_x, sigma_y)
assert values[0][0] == -np.exp(-1./2)
assert values[1][0] == 0.
assert values[0][1] == -2*np.exp(-2.**2/2)
assert values[1][1] == 0.
def test_hessian(self):
x = 1
y = 2
amp = 1.*2*np.pi
center_x = 1.
center_y = 1.
sigma_x = 1.
sigma_y = 1.
values = self.Gaussian.hessian( x, y, amp, center_x, center_y, sigma_x, sigma_y)
assert values[0] == -np.exp(-1./2)
assert values[1] == 0.
assert values[2] == 0.
x = np.array([2,3,4])
y = np.array([1,1,1])
values = self.Gaussian.hessian( x, y, amp, center_x, center_y, sigma_x, sigma_y)
assert values[0][0] == 0.
assert values[1][0] == -np.exp(-1./2)
assert values[2][0] == 0.
assert values[0][1] == 0.40600584970983811
assert values[1][1] == -0.1353352832366127
assert values[2][1] == 0.
class TestGaussianKappa(object):
"""
test the Gaussian with Gaussian kappa
"""
def setup(self):
self.gaussian_kappa = GaussianKappa()
self.gaussian = Gaussian()
def test_kappa(self):
x = np.linspace(0, 5, 10)
y = np.linspace(0, 5, 10)
amp = 1.*2*np.pi
center_x = 0.
center_y = 0.
sigma_x = 1.
sigma_y = 1.
f_xx, f_yy, f_xy = self.gaussian_kappa.hessian(x, y, amp, sigma_x, sigma_y, center_x, center_y)
kappa = 1./2 * (f_xx + f_yy)
kappa_true = self.gaussian.function(x, y, amp, sigma_x, sigma_y, center_x, center_y)
print(kappa_true)
print(kappa)
npt.assert_almost_equal(kappa[0], kappa_true[0], decimal=5)
npt.assert_almost_equal(kappa[1], kappa_true[1], decimal=5)
def test_density_2d(self):
x = np.linspace(0, 5, 10)
y = np.linspace(0, 5, 10)
amp = 1.*2*np.pi
center_x = 0.
center_y = 0.
sigma_x = 1.
sigma_y = 1.
f_xx, f_yy, f_xy = self.gaussian_kappa.hessian(x, y, amp, sigma_x, sigma_y, center_x, center_y)
kappa = 1./2 * (f_xx + f_yy)
amp_3d = self.gaussian_kappa._amp2d_to_3d(amp, sigma_x, sigma_y)
density_2d = self.gaussian_kappa.density_2d(x, y, amp_3d, sigma_x, sigma_y, center_x, center_y)
print kappa, density_2d
npt.assert_almost_equal(kappa[1], density_2d[1], decimal=5)
npt.assert_almost_equal(kappa[2], density_2d[2], decimal=5)
def test_3d_2d_convention(self):
x = np.linspace(0, 5, 10)
y = np.linspace(0, 5, 10)
amp = 1.*2*np.pi
center_x = 0.
center_y = 0.
sigma_x = 1.
sigma_y = 1.
amp_3d = self.gaussian_kappa._amp2d_to_3d(amp, sigma_x, sigma_y)
density_2d_gauss = self.gaussian_kappa.density_2d(x, y, amp_3d, sigma_x, sigma_y, center_x, center_y)
density_2d = self.gaussian.function(x, y, amp, sigma_x, sigma_y, center_x, center_y)
print density_2d_gauss, density_2d
npt.assert_almost_equal(density_2d_gauss[1], density_2d[1], decimal=5)
if __name__ == '__main__':
pytest.main()
|
sibirrer/astrofunc
|
test/test_gaussian.py
|
Python
|
mit
| 4,514
|
[
"Gaussian"
] |
4458baae73a8170b9b8806bd8fd0a3906176d36a668f1336c113284c1d28261b
|
# Copyright (C) 2012,2013,2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
********************************
tabulated - write tabulated file
********************************
"""
import espressopp
from espressopp import Real3D
def writeTabFile(pot, name, N, low=0.0, high=2.5, body=2):
"""
writeTabFile can be used to create a table for any potential
Parameters are:
* pot : this is any espressopp.interaction potential
* name : filename
* N : number of line to write
* low : lowest r (default is 0.0)
* high : highest r (default is 2.5)
This function has not been tested for 3 and 4 body interactions
"""
outfile = open(name, "w")
delta = (high - low) / (N - 1)
for i in range(N):
r = low + i * delta
energy = pot.computeEnergy(r)
if body == 2: # this is for 2-body potentials
force = pot.computeForce(Real3D(r, 0.0, 0.0))[0]
#force /= r
else: # this is for 3- and 4-body potentials
force = pot.computeForce(r)
outfile.write("%15.8g %15.8g %15.8g\n" % (r, energy, force))
outfile.close()
|
espressopp/espressopp
|
src/tools/tabulated.py
|
Python
|
gpl-3.0
| 1,943
|
[
"ESPResSo"
] |
83231884250a0991025448d7551fa37a0da4f3a8dd61d72509c5ea2d8a881c25
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def hasPathSum(self, root: TreeNode, sum: int) -> bool:
if not root:
return False
def visit(s, n):
if not n:
return False
s -= n.val
if not (n.left or n.right):
# 叶子节点
if s == 0:
return True
else:
return False
else:
return visit(s, n.left) or visit(s, n.right)
return visit(sum, root)
|
fy0/my-leetcode
|
112. Path Sum/main.py
|
Python
|
apache-2.0
| 681
|
[
"VisIt"
] |
e4a5f9cc6247569e093ee708381737c0c3524cbc3a06134110f89e85d486911d
|
import os
import unittest
from __main__ import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
import SimpleITK as sitk
import sitkUtils
import math
import numpy
import LabelStatistics
#
# ComputeT2Star
#
class ComputeT2Star(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "ComputeT2Star" # TODO make this more human readable by adding spaces
self.parent.categories = ["IGT"]
self.parent.dependencies = []
self.parent.contributors = ["Junichi Tokuda (Brigham and Women's Hospital)"] # replace with "Firstname Lastname (Organization)"
self.parent.helpText = """
This is an example of scripted loadable module bundled in an extension.
It performs a simple thresholding on the input volume and optionally captures a screenshot.
"""
self.parent.acknowledgementText = """
This module was developed based on a template created by Jean-Christophe Fillion-Robin, Kitware Inc.
and Steve Pieper, Isomics, Inc. and was partially funded by NIH grant 3P41RR013218-12S1.
""" # replace with organization, grant and thanks.
#
# ComputeT2StarWidget
#
class ComputeT2StarWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
#--------------------------------------------------
# For debugging
#
# Reload and Test area
reloadCollapsibleButton = ctk.ctkCollapsibleButton()
reloadCollapsibleButton.text = "Reload && Test"
self.layout.addWidget(reloadCollapsibleButton)
reloadFormLayout = qt.QFormLayout(reloadCollapsibleButton)
reloadCollapsibleButton.collapsed = True
# reload button
# (use this during development, but remove it when delivering
# your module to users)
self.reloadButton = qt.QPushButton("Reload")
self.reloadButton.toolTip = "Reload this module."
self.reloadButton.name = "ComputeT2Star Reload"
reloadFormLayout.addWidget(self.reloadButton)
self.reloadButton.connect('clicked()', self.onReload)
#
#--------------------------------------------------
# Instantiate and connect widgets ...
#
# Parameters Area
#
parametersCollapsibleButton = ctk.ctkCollapsibleButton()
parametersCollapsibleButton.text = "Parameters"
self.layout.addWidget(parametersCollapsibleButton)
# Layout within the dummy collapsible button
parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)
#
# input volume selector
#
self.inputTE1Selector = slicer.qMRMLNodeComboBox()
self.inputTE1Selector.nodeTypes = ( ("vtkMRMLScalarVolumeNode"), "" )
self.inputTE1Selector.selectNodeUponCreation = True
self.inputTE1Selector.addEnabled = True
self.inputTE1Selector.removeEnabled = True
self.inputTE1Selector.noneEnabled = True
self.inputTE1Selector.renameEnabled = True
self.inputTE1Selector.showHidden = False
self.inputTE1Selector.showChildNodeTypes = False
self.inputTE1Selector.setMRMLScene( slicer.mrmlScene )
self.inputTE1Selector.setToolTip( "Pick the first volume" )
parametersFormLayout.addRow("Input Volume 1: ", self.inputTE1Selector)
#
# input volume selector
#
self.inputTE2Selector = slicer.qMRMLNodeComboBox()
self.inputTE2Selector.nodeTypes = ( ("vtkMRMLScalarVolumeNode"), "" )
self.inputTE2Selector.selectNodeUponCreation = True
self.inputTE2Selector.addEnabled = True
self.inputTE2Selector.removeEnabled = True
self.inputTE2Selector.noneEnabled = True
self.inputTE2Selector.renameEnabled = True
self.inputTE2Selector.showHidden = False
self.inputTE2Selector.showChildNodeTypes = False
self.inputTE2Selector.setMRMLScene( slicer.mrmlScene )
self.inputTE2Selector.setToolTip( "Pick the second volume" )
parametersFormLayout.addRow("Input Volume 2: ", self.inputTE2Selector)
#
# reference ROI selector
#
self.referenceROISelector = slicer.qMRMLNodeComboBox()
self.referenceROISelector.nodeTypes = ( ("vtkMRMLLabelMapVolumeNode"), "" )
self.referenceROISelector.selectNodeUponCreation = False
self.referenceROISelector.addEnabled = True
self.referenceROISelector.removeEnabled = True
self.referenceROISelector.noneEnabled = True
self.referenceROISelector.renameEnabled = True
self.referenceROISelector.showHidden = False
self.referenceROISelector.showChildNodeTypes = False
self.referenceROISelector.setMRMLScene( slicer.mrmlScene )
self.referenceROISelector.setToolTip( "Reference ROI for scaling factor and noise estimation" )
parametersFormLayout.addRow("Reference ROI: ", self.referenceROISelector)
#
# outputT2Star volume selector
#
self.outputT2StarSelector = slicer.qMRMLNodeComboBox()
self.outputT2StarSelector.nodeTypes = ( ("vtkMRMLScalarVolumeNode"), "" )
self.outputT2StarSelector.selectNodeUponCreation = True
self.outputT2StarSelector.addEnabled = True
self.outputT2StarSelector.removeEnabled = True
self.outputT2StarSelector.noneEnabled = True
self.outputT2StarSelector.renameEnabled = True
self.outputT2StarSelector.showHidden = False
self.outputT2StarSelector.showChildNodeTypes = False
self.outputT2StarSelector.setMRMLScene( slicer.mrmlScene )
self.outputT2StarSelector.setToolTip( "Pick the T2Star output volume." )
parametersFormLayout.addRow("T2Star Output Volume: ", self.outputT2StarSelector)
#
# outputR2Star volume selector
#
self.outputR2StarSelector = slicer.qMRMLNodeComboBox()
self.outputR2StarSelector.nodeTypes = ( ("vtkMRMLScalarVolumeNode"), "" )
self.outputR2StarSelector.selectNodeUponCreation = True
self.outputR2StarSelector.addEnabled = True
self.outputR2StarSelector.removeEnabled = True
self.outputR2StarSelector.noneEnabled = True
self.outputR2StarSelector.renameEnabled = True
self.outputR2StarSelector.showHidden = False
self.outputR2StarSelector.showChildNodeTypes = False
self.outputR2StarSelector.setMRMLScene( slicer.mrmlScene )
self.outputR2StarSelector.setToolTip( "Pick the R2Star output volume." )
parametersFormLayout.addRow("R2Star Output Volume: ", self.outputR2StarSelector)
#
# First TE
#
self.TE1SpinBox = qt.QDoubleSpinBox()
self.TE1SpinBox.objectName = 'TE1SpinBox'
self.TE1SpinBox.setMaximum(100.0)
self.TE1SpinBox.setMinimum(0.0000)
self.TE1SpinBox.setDecimals(8)
self.TE1SpinBox.setValue(0.00007)
self.TE1SpinBox.setToolTip("TE for Input Volume 1")
parametersFormLayout.addRow("TE1 (s): ", self.TE1SpinBox)
#
# Second TE
#
self.TE2SpinBox = qt.QDoubleSpinBox()
self.TE2SpinBox.objectName = 'TE2SpinBox'
self.TE2SpinBox.setMaximum(100.0)
self.TE2SpinBox.setMinimum(0.0000)
self.TE2SpinBox.setDecimals(8)
self.TE2SpinBox.setValue(0.002)
self.TE2SpinBox.setToolTip("TE for Input Volume 2")
parametersFormLayout.addRow("TE2 (s): ", self.TE2SpinBox)
#
# Scaling Factor
#
self.ScaleSpinBox = qt.QDoubleSpinBox()
self.ScaleSpinBox.objectName = 'ScaleSpinBox'
self.ScaleSpinBox.setMaximum(10.000)
self.ScaleSpinBox.setMinimum(0.000)
self.ScaleSpinBox.setDecimals(8)
self.ScaleSpinBox.setValue(1.000)
self.ScaleSpinBox.setToolTip("Scaling factor to adjust magnitude of volume 2.")
parametersFormLayout.addRow("Scaling Factor: ", self.ScaleSpinBox)
#
# R2* for Scale calibration
#
self.scaleCalibrationR2sSpinBox = qt.QDoubleSpinBox()
self.scaleCalibrationR2sSpinBox.objectName = 'scaleCalibrationR2sSpinBox'
self.scaleCalibrationR2sSpinBox.setMaximum(1000.0)
self.scaleCalibrationR2sSpinBox.setMinimum(0.0)
self.scaleCalibrationR2sSpinBox.setDecimals(8)
self.scaleCalibrationR2sSpinBox.setValue(129.565)
self.scaleCalibrationR2sSpinBox.setToolTip("Scale Calibration")
parametersFormLayout.addRow("Scale Clibration R2* (s^-1): ", self.scaleCalibrationR2sSpinBox)
#
# Echo 1/2 signal lower input threshold
#
self.Echo1InputThresholdSpinBox = qt.QDoubleSpinBox()
self.Echo1InputThresholdSpinBox.objectName = 'Echo1InputThresholdSpinBox'
self.Echo1InputThresholdSpinBox.setMaximum(65536.000)
self.Echo1InputThresholdSpinBox.setMinimum(0.000)
self.Echo1InputThresholdSpinBox.setDecimals(6)
self.Echo1InputThresholdSpinBox.setValue(0.000)
self.Echo1InputThresholdSpinBox.setToolTip("Lower input threshold for echo 1.")
parametersFormLayout.addRow("Lower Input Threshold (Echo 1): ", self.Echo1InputThresholdSpinBox)
self.Echo2InputThresholdSpinBox = qt.QDoubleSpinBox()
self.Echo2InputThresholdSpinBox.objectName = 'Echo2InputThresholdSpinBox'
self.Echo2InputThresholdSpinBox.setMaximum(65536.000)
self.Echo2InputThresholdSpinBox.setMinimum(0.000)
self.Echo2InputThresholdSpinBox.setDecimals(6)
self.Echo2InputThresholdSpinBox.setValue(0.000)
self.Echo2InputThresholdSpinBox.setToolTip("Lower input threshold for echo 2.")
parametersFormLayout.addRow("Lower Input Threshold (Echo 2): ", self.Echo2InputThresholdSpinBox)
self.MinT2sSpinBox = qt.QDoubleSpinBox()
self.MinT2sSpinBox.objectName = 'MinT2sSpinBox'
self.MinT2sSpinBox.setMinimum(1.000)
self.MinT2sSpinBox.setMinimum(0.000)
self.MinT2sSpinBox.setDecimals(6)
self.MinT2sSpinBox.setValue(0.00125)
self.MinT2sSpinBox.setToolTip("Minimum T2* for output (maximum R2* = 1 / (minimum T2*)).")
parametersFormLayout.addRow("Minimum T2* for output (s): ", self.MinT2sSpinBox)
#
# Check box to correct noise
#
self.useNoiseCorrectionFlagCheckBox = qt.QCheckBox()
self.useNoiseCorrectionFlagCheckBox.checked = 1
self.useNoiseCorrectionFlagCheckBox.setToolTip("If checked, correct noise based on the estimated noise level.")
parametersFormLayout.addRow("Use Noise Correction", self.useNoiseCorrectionFlagCheckBox)
#
# Noise Level
#
self.Echo1NoiseSpinBox = qt.QDoubleSpinBox()
self.Echo1NoiseSpinBox.objectName = 'Echo1NoiseSpinBox'
self.Echo1NoiseSpinBox.setMaximum(500.0)
self.Echo1NoiseSpinBox.setMinimum(0.0)
self.Echo1NoiseSpinBox.setDecimals(6)
self.Echo1NoiseSpinBox.setValue(0.0)
self.Echo1NoiseSpinBox.setToolTip("Noise level for 1st echo noise correction.")
parametersFormLayout.addRow("Noise Level (Echo 1): ", self.Echo1NoiseSpinBox)
self.Echo2NoiseSpinBox = qt.QDoubleSpinBox()
self.Echo2NoiseSpinBox.objectName = 'Echo2NoiseSpinBox'
self.Echo2NoiseSpinBox.setMaximum(500.0)
self.Echo2NoiseSpinBox.setMinimum(0.0)
self.Echo2NoiseSpinBox.setDecimals(6)
self.Echo2NoiseSpinBox.setValue(0.0)
self.Echo2NoiseSpinBox.setToolTip("Noise level for 1st echo noise correction.")
parametersFormLayout.addRow("Noise Level (Echo 2): ", self.Echo2NoiseSpinBox)
#
# check box to use threshold
#
self.useOutputThresholdFlagCheckBox = qt.QCheckBox()
self.useOutputThresholdFlagCheckBox.checked = 1
self.useOutputThresholdFlagCheckBox.setToolTip("If checked, apply the threshold to limit the pixel value ranges.")
parametersFormLayout.addRow("Use OutputThreshold", self.useOutputThresholdFlagCheckBox)
#
# Upper threshold - We set threshold value to limit the range of intensity
#
self.upperOutputThresholdSpinBox = qt.QDoubleSpinBox()
self.upperOutputThresholdSpinBox.objectName = 'upperOutputThresholdSpinBox'
self.upperOutputThresholdSpinBox.setMaximum(1000000.0)
self.upperOutputThresholdSpinBox.setMinimum(-1000000.0)
self.upperOutputThresholdSpinBox.setDecimals(6)
self.upperOutputThresholdSpinBox.setValue(1000000.0)
self.upperOutputThresholdSpinBox.setToolTip("Upper threshold for the output")
parametersFormLayout.addRow("Upper OutputThreshold (s): ", self.upperOutputThresholdSpinBox)
#
# Lower threshold - We set threshold value to limit the range of intensity
#
self.lowerOutputThresholdSpinBox = qt.QDoubleSpinBox()
self.lowerOutputThresholdSpinBox.objectName = 'lowerOutputThresholdSpinBox'
self.lowerOutputThresholdSpinBox.setMaximum(1000000.0)
self.lowerOutputThresholdSpinBox.setMinimum(-1000000.0)
self.lowerOutputThresholdSpinBox.setDecimals(6)
self.lowerOutputThresholdSpinBox.setValue(-1000000.0)
self.lowerOutputThresholdSpinBox.setToolTip("Lower threshold for the output")
parametersFormLayout.addRow("Lower OutputThreshold (s): ", self.lowerOutputThresholdSpinBox)
#
# Apply Button
#
self.applyButton = qt.QPushButton("Apply")
self.applyButton.toolTip = "Run the algorithm."
self.applyButton.enabled = False
parametersFormLayout.addRow(self.applyButton)
# connections
self.applyButton.connect('clicked(bool)', self.onApplyButton)
self.inputTE1Selector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSelect)
self.inputTE2Selector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSelect)
self.referenceROISelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSelect)
self.outputT2StarSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSelect)
self.outputR2StarSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSelect)
self.useOutputThresholdFlagCheckBox.connect('toggled(bool)', self.onUseOutputThreshold)
self.useNoiseCorrectionFlagCheckBox.connect('toggled(bool)', self.onUseNoiseCorrection)
# Add vertical spacer
self.layout.addStretch(1)
# Refresh Apply button state
self.onSelect()
def cleanup(self):
pass
def onSelect(self):
if self.referenceROISelector.currentNode():
self.ScaleSpinBox.enabled = False
else:
self.ScaleSpinBox.enabled = True
if self.useNoiseCorrectionFlagCheckBox.checked and self.referenceROISelector.currentNode() == None:
self.Echo1NoiseSpinBox.enabled = True
self.Echo2NoiseSpinBox.enabled = True
else:
self.Echo1NoiseSpinBox.enabled = False
self.Echo2NoiseSpinBox.enabled = False
self.applyButton.enabled = self.inputTE1Selector.currentNode() and self.inputTE1Selector.currentNode() and (self.outputT2StarSelector.currentNode() or self.outputR2StarSelector.currentNode())
def onUseOutputThreshold(self):
if self.useOutputThresholdFlagCheckBox.checked == True:
self.lowerOutputThresholdSpinBox.enabled = True;
self.upperOutputThresholdSpinBox.enabled = True;
else:
self.lowerOutputThresholdSpinBox.enabled = False;
self.upperOutputThresholdSpinBox.enabled = False;
def onUseNoiseCorrection(self):
if self.useNoiseCorrectionFlagCheckBox.checked == True and self.referenceROISelector.currentNode() == None:
self.Echo1NoiseSpinBox.enabled = True;
self.Echo2NoiseSpinBox.enabled = True;
else:
self.Echo1NoiseSpinBox.enabled = False;
self.Echo2NoiseSpinBox.enabled = False;
def onApplyButton(self):
logic = ComputeT2StarLogic()
#enableScreenshotsFlag = self.enableScreenshotsFlagCheckBox.checked
#imageOutputThreshold = self.imageOutputThresholdSliderWidget.value
t2name = ''
r2name = ''
if self.outputT2StarSelector.currentNode():
t2name = self.outputT2StarSelector.currentNode().GetName()
if self.outputR2StarSelector.currentNode():
r2name = self.outputR2StarSelector.currentNode().GetName()
inputThreshold = [self.Echo1InputThresholdSpinBox.value, self.Echo2InputThresholdSpinBox.value]
minT2s = self.MinT2sSpinBox.value
outputThreshold = None
if self.useOutputThresholdFlagCheckBox.checked == True:
outputThreshold = [self.lowerOutputThresholdSpinBox.value, self.upperOutputThresholdSpinBox.value]
noiseLevel = None
if self.useNoiseCorrectionFlagCheckBox.checked == True:
noiseLevel = [self.Echo1NoiseSpinBox.value, self.Echo2NoiseSpinBox.value]
scaleFactor = self.ScaleSpinBox.value
noiseLevel = None
if self.referenceROISelector.currentNode():
inputTE1VolumeNode = self.inputTE1Selector.currentNode()
inputTE2VolumeNode = self.inputTE2Selector.currentNode()
ROINode = self.referenceROISelector.currentNode()
imageTE1 = sitk.Cast(sitkUtils.PullFromSlicer(inputTE1VolumeNode.GetID()), sitk.sitkFloat64)
imageTE2 = sitk.Cast(sitkUtils.PullFromSlicer(inputTE2VolumeNode.GetID()), sitk.sitkFloat64)
roiImage = sitk.Cast(sitkUtils.PullFromSlicer(ROINode.GetID()), sitk.sitkInt8)
noiseEcho1 = logic.CalcNoise(imageTE1, None, roiImage)
noiseEcho2 = logic.CalcNoise(imageTE2, None, roiImage)
noiseLevel = [noiseEcho1, noiseEcho2]
print "noises = [%f, %f]\n" % (noiseEcho1, noiseEcho2)
logic.CorrectNoise(imageTE1, noiseEcho1)
logic.CorrectNoise(imageTE2, noiseEcho2)
scaleFactor = logic.CalcScalingFactor(imageTE1, imageTE2, roiImage,
self.TE1SpinBox.value, self.TE2SpinBox.value,
self.scaleCalibrationR2sSpinBox.value)
print "scale = %f\n" % scaleFactor
self.ScaleSpinBox.value = scaleFactor
self.Echo1NoiseSpinBox.value = noiseEcho1
self.Echo2NoiseSpinBox.value = noiseEcho2
else:
if self.useNoiseCorrectionFlagCheckBox.checked:
noiseLevel = [self.Echo1NoiseSpinBox.value, self.Echo2NoiseSpinBox.value]
logic.run(self.inputTE1Selector.currentNode(), self.inputTE2Selector.currentNode(),
self.outputT2StarSelector.currentNode(), self.outputR2StarSelector.currentNode(),
self.TE1SpinBox.value, self.TE2SpinBox.value, scaleFactor,
noiseLevel, outputThreshold, inputThreshold, minT2s)
### Since PushToSlicer() called in logic.run() will delete the original node, obtain the new node and
### reset the selector.
t2Node = slicer.util.getNode(t2name)
r2Node = slicer.util.getNode(r2name)
self.outputT2StarSelector.setCurrentNode(t2Node)
self.outputR2StarSelector.setCurrentNode(r2Node)
def onReload(self, moduleName="ComputeT2Star"):
# Generic reload method for any scripted module.
# ModuleWizard will subsitute correct default moduleName.
globals()[moduleName] = slicer.util.reloadScriptedModule(moduleName)
#
# ComputeT2StarLogic
#
class ComputeT2StarLogic(ScriptedLoadableModuleLogic):
def __init__(self):
ScriptedLoadableModuleLogic.__init__(self)
def isValidInputOutputData(self, inputTE1VolumeNode, inputTE2VolumeNode):
"""Validates if the output is not the same as input
"""
if not inputTE1VolumeNode:
logging.debug('isValidInputOutputData failed: no input volume node for TE1 image defined')
return False
if not inputTE2VolumeNode:
logging.debug('isValidInputOutputData failed: no input volume node for TE2 image defined')
return False
return True
def CalcNoise(self, image1, image2, roiImage):
LabelStatistics = sitk.LabelStatisticsImageFilter()
if image2:
#Aimage1 = sitk.Cast(sitkUtils.PullFromSlicer(image1Node.GetID()), sitk.sitkFloat32)
#Aimage2 = sitk.Cast(sitkUtils.PullFromSlicer(image2Node.GetID()), sitk.sitkFloat32)
#roiImage = sitk.Cast(sitkUtils.PullFromSlicer(roiImageNode.GetID()), sitk.sitkInt8)
subImage = sitk.Subtract(image1, image2)
absImage = sitk.Abs(subImage)
LabelStatistics.Execute(absImage, roiImage)
meanAbsDiff = LabelStatistics.GetMean(1)
return (meanAbsDiff/math.sqrt(math.pi/2.0))
else:
LabelStatistics.Execute(image1, roiImage)
SD = LabelStatistics.GetSigma(1)
return SD
def CorrectNoise(self, image, noiseLevel):
squareImage = sitk.Pow(image, 2)
subImage = sitk.Subtract(squareImage, noiseLevel*noiseLevel)
subImagePositive1 = sitk.Threshold(subImage,0.0, float('Inf'), 0.0)
image = sitk.Sqrt(subImagePositive1)
def CalcScalingFactor(self, image1, image2, roiImage, TE1, TE2, scaleCalibrationR2s):
#image1 = sitk.Cast(sitkUtils.PullFromSlicer(image1Node.GetID()), sitk.sitkFloat32)
#image2 = sitk.Cast(sitkUtils.PullFromSlicer(image2Node.GetID()), sitk.sitkFloat32)
#roiImage = sitk.Cast(sitkUtils.PullFromSlicer(ROINode.GetID()), sitk.sitkInt8)
LabelStatistics = sitk.LabelStatisticsImageFilter()
LabelStatistics.Execute(image1, roiImage)
echo1 = LabelStatistics.GetMean(1)
LabelStatistics.Execute(image2, roiImage)
echo2 = LabelStatistics.GetMean(1)
scale = echo1 / (echo2 * numpy.exp(scaleCalibrationR2s*(TE2-TE1)))
return (scale)
def run(self, inputTE1VolumeNode, inputTE2VolumeNode, outputT2StarVolumeNode, outputR2StarVolumeNode, TE1, TE2, scaleFactor, noiseLevel, outputThreshold, inputThreshold, minT2s):
"""
Run the actual algorithm
"""
echo1NoiseLevel = 0.0
echo2NoiseLevel = 0.0
upperOutputThreshold = 0.0
lowerOutputThreshold = 0.0
if not self.isValidInputOutputData(inputTE1VolumeNode, inputTE2VolumeNode):
slicer.util.errorDisplay('Input volume is the same as output volume. Choose a different output volume.')
return False
logging.info('Processing started')
imageTE1 = sitk.Cast(sitkUtils.PullFromSlicer(inputTE1VolumeNode.GetID()), sitk.sitkFloat64)
imageTE2 = sitk.Cast(sitkUtils.PullFromSlicer(inputTE2VolumeNode.GetID()), sitk.sitkFloat64)
# Noise correction
# Echo 1
if noiseLevel != None:
echo1NoiseLevel = noiseLevel[0]
echo2NoiseLevel = noiseLevel[1]
squareImage1 = sitk.Pow(imageTE1, 2)
subImage1 = sitk.Subtract(squareImage1, echo1NoiseLevel*echo1NoiseLevel)
subImagePositive1 = sitk.Threshold(subImage1,0.0, float('Inf'), 0.0)
imageTE1 = sitk.Sqrt(subImagePositive1)
squareImage2 = sitk.Pow(imageTE2, 2)
subImage2 = sitk.Subtract(squareImage2, echo2NoiseLevel*echo2NoiseLevel)
subImagePositive2 = sitk.Threshold(subImage2,0.0, float('Inf'), 0.0)
imageTE2 = sitk.Sqrt(subImagePositive2)
else:
# Simply remove negative values (not needed?)
imageTE1 = sitk.Threshold(imageTE1,0.0, float('Inf'), 0.0)
imageTE2 = sitk.Threshold(imageTE2,0.0, float('Inf'), 0.0)
## Apply scaling factor to the second echo
imageTE2 = sitk.Multiply(imageTE2, scaleFactor)
## Create mask to exclude invalid pixels
# Criteria for validation
# 1. First or second echo signal is less than the input threshold
# 2. Second echo signal is greater than the first
mask = None
imaskFillT2s = None
imaskFillR2s = None
if inputThreshold != None:
mask1 = sitk.BinaryThreshold(imageTE1, inputThreshold[0], float('Inf'), 1, 0)
mask2 = sitk.BinaryThreshold(imageTE2, inputThreshold[1], float('Inf'), 1, 0)
#mask3 = sitk.Greater(imageTE2, imageTE1, 1, 0)
mask = sitk.And(mask1, mask2)
#mask = sitk.And(mask, mask3)
imask = sitk.Not(mask)
imaskFloat = sitk.Cast(imask, sitk.sitkFloat64)
imaskFillT2s = imaskFloat * minT2s
imaskFillR2s = 0.0
if minT2s > 0:
imaskFillR2s = imaskFloat * (1/minT2s)
if outputThreshold != None:
lowerOutputThreshold = outputThreshold[0]
upperOutputThreshold = outputThreshold[1]
if outputT2StarVolumeNode:
imageT2Star = sitk.Divide(TE1-TE2, sitk.Log(sitk.Divide(imageTE2, imageTE1)))
if inputThreshold != None:
imageT2Star = sitk.Mask(imageT2Star, mask)
imageT2Star = sitk.Add(imageT2Star, imaskFillT2s)
if outputThreshold != None:
imageT2StarThreshold = sitk.Threshold(imageT2Star, lowerOutputThreshold, upperOutputThreshold, 0.0)
sitkUtils.PushToSlicer(imageT2StarThreshold, outputT2StarVolumeNode.GetName(), 0, True)
else:
sitkUtils.PushToSlicer(imageT2Star, outputT2StarVolumeNode.GetName(), 0, True)
if outputR2StarVolumeNode:
imageR2Star = sitk.Divide(sitk.Log(sitk.Divide(imageTE2, imageTE1)), TE1-TE2)
if inputThreshold != None:
imageR2Star = sitk.Mask(imageR2Star, mask)
imageR2Star = sitk.Add(imageR2Star, imaskFillR2s)
if outputThreshold != None:
imageR2StarThreshold = sitk.Threshold(imageR2Star, lowerOutputThreshold, upperOutputThreshold, 0.0)
sitkUtils.PushToSlicer(imageR2StarThreshold, outputR2StarVolumeNode.GetName(), 0, True)
else:
sitkUtils.PushToSlicer(imageR2Star, outputR2StarVolumeNode.GetName(), 0, True)
logging.info('Processing completed')
return True
class ComputeT2StarTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_ComputeT2Star1()
def test_ComputeT2Star1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests should exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
pass
#self.delayDisplay("Starting the test")
##
## first, get some data
##
#import urllib
#downloads = (
# ('http://slicer.kitware.com/midas3/download?items=5767', 'FA.nrrd', slicer.util.loadVolume),
# )
#
#for url,name,loader in downloads:
# filePath = slicer.app.temporaryPath + '/' + name
# if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:
# logging.info('Requesting download %s from %s...\n' % (name, url))
# urllib.urlretrieve(url, filePath)
# if loader:
# logging.info('Loading %s...' % (name,))
# loader(filePath)
#self.delayDisplay('Finished with download and loading')
#
#volumeNode = slicer.util.getNode(pattern="FA")
#logic = ComputeT2StarLogic()
#self.assertTrue( logic.hasImageData(volumeNode) )
#self.delayDisplay('Test passed!')
|
tokjun/CryoMonitoring
|
ComputeT2Star/ComputeT2Star.py
|
Python
|
bsd-3-clause
| 26,949
|
[
"VTK"
] |
c8a5698dee57734f6c956bf0fc7d7c8b903bd7e74a83f3a1517f6ce29246a330
|
# encoding: utf8
from __future__ import absolute_import, division
from collections import defaultdict, namedtuple
import colorsys
import json
import logging
import mimetypes
import re
import pokedex.db
import pokedex.db.tables as tables
import pkg_resources
from pylons import config, request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from pylons.decorators import jsonify
from sqlalchemy import and_, or_, not_
from sqlalchemy.orm import aliased, contains_eager, eagerload, eagerload_all, join, joinedload, subqueryload, subqueryload_all
from sqlalchemy.orm import subqueryload, subqueryload_all
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.sql import func
from spline import model
from spline.model import meta
from spline.lib.base import BaseController, render
from spline.lib import helpers as h
from splinext.pokedex import db, helpers as pokedex_helpers
import splinext.pokedex.db as db
from splinext.pokedex.magnitude import parse_size
log = logging.getLogger(__name__)
def bar_color(hue, pastelness):
"""Returns a color in the form #rrggbb that has the provided hue and
lightness/saturation equal to the given "pastelness".
"""
r, g, b = colorsys.hls_to_rgb(hue, pastelness, pastelness)
return "#%02x%02x%02x" % (r * 256, g * 256, b * 256)
def first(func, iterable):
"""Returns the first element in iterable for which func(elem) is true.
Equivalent to next(ifilter(func, iterable)).
"""
for elem in iterable:
if func(elem):
return elem
def _pokemon_move_method_sort_key((method, _)):
"""Sorts methods by id, except that tutors and machines are bumped to the
bottom, as they tend to be much longer than everything else.
"""
if method.name in (u'Tutor', u'Machine'):
return method.id + 1000
else:
return method.id
def _collapse_pokemon_move_columns(table, thing):
"""Combines adjacent identical columns in a pokemon_move structure.
Arguments are the table structure (defined in comments below) and the
Pokémon or move in question.
Returns a list of column groups, each represented by a list of its columns,
like `[ [ [gs, c] ], [ [rs, e], [fl] ], ... ]`
"""
# What we really need to know is what versions are ultimately collapsed
# into each column. We also need to know how the columns are grouped into
# generations. So we need a list of lists of lists of version groups:
move_columns = []
# Only even consider versions in which this thing actually exists
q = db.pokedex_session.query(tables.Generation) \
.filter(tables.Generation.id >= thing.generation_id) \
.order_by(tables.Generation.id.asc())
for generation in q:
move_columns.append( [] ) # A new column group for this generation
for i, version_group in enumerate(generation.version_groups):
if i == 0:
# Can't collapse these versions anywhere! Create a new column
move_columns[-1].append( [version_group] )
continue
# Test to see if this version group column is identical to the one
# immediately to its left; if so, we can combine them
squashable = True
for method, method_list in table:
# Tutors are special; they will NEVER collapse, so ignore them
# for now. When we actually print the table, we'll concatenate
# all the tutor cells instead of just using the first one like
# with everything else
if method.name == 'Tutor':
continue
for move, version_group_data in method_list:
if version_group_data.get(version_group, None) != \
version_group_data.get(move_columns[-1][-1][-1], None):
break
else:
continue
break # We broke out and didn't get to continue—not squashable
else:
# Stick this version group in the previous column
move_columns[-1][-1].append(version_group)
continue
# Create a new column
move_columns[-1].append( [version_group] )
return move_columns
def _move_tutor_version_groups(table):
"""Tutored moves are never the same between version groups, so the column
collapsing ignores tutors entirely. This means that we might end up
wanting to show several versions as having a tutor within a single column.
So that "E, FRLG" lines up with "FRLG", there has to be a blank space for
"E", which requires finding all the version groups that contain tutors.
"""
move_tutor_version_groups = set()
for method, method_list in table:
if method.name != 'Tutor':
continue
for move, version_group_data in method_list:
move_tutor_version_groups.update(version_group_data.keys())
return move_tutor_version_groups
def level_range(a, b):
"""If a and b are the same, returns 'L{a}'. Otherwise, returns 'L{a}–{b}'.
"""
if a == b:
return u"L{0}".format(a)
else:
return u"L{0}–{1}".format(a, b)
class CombinedEncounter(object):
"""Represents several encounter rows, collapsed together. Rarities and
level ranges are combined correctly.
Assumed to have the same terrain. Also location and area and so forth, but
those aren't actually needed.
"""
def __init__(self, encounter=None):
self.terrain = None
self.rarity = 0
self.min_level = 0
self.max_level = 0
if encounter:
self.combine_with(encounter)
def combine_with(self, encounter):
if self.terrain and self.terrain != encounter.slot.terrain:
raise ValueError(
"Can't combine terrain {0} with {1}"
.format(self.terrain.name, encounter.slot.terrain.name)
)
self.rarity += encounter.slot.rarity
self.max_level = max(self.max_level, encounter.max_level)
if not self.min_level:
self.min_level = encounter.min_level
else:
self.min_level = min(self.min_level, encounter.min_level)
@property
def level(self):
return level_range(self.min_level, self.max_level)
class PokedexController(BaseController):
# Used by lookup disambig pages
table_labels = {
tables.Ability: 'ability',
tables.Item: 'item',
tables.Location: 'location',
tables.Move: 'move',
tables.Nature: 'nature',
tables.Pokemon: u'Pokémon',
tables.Type: 'type',
}
# Dict of terrain name => icon path
encounter_terrain_icons = {
'Surfing': 'surfing.png',
'Fishing with an Old Rod': 'old-rod.png',
'Fishing with a Good Rod': 'good-rod.png',
'Fishing with a Super Rod': 'super-rod.png',
'Walking in tall grass or a cave': 'grass.png',
'Smashing rocks': 'rock-smash.png',
}
# Maps condition value names to representative icons
encounter_condition_value_icons = {
# Conditions
'Not during a swarm': 'swarm-no.png',
'During a swarm': 'swarm-yes.png',
'No fishing swarm': 'swarm-no.png',
'Fishing swarm': 'swarm-yes.png',
'No surfing swarm': 'swarm-no.png',
'Surfing swarm': 'swarm-yes.png',
'In the morning': 'time-morning.png',
'During the day': 'time-daytime.png',
'At night': 'time-night.png',
u'Not using PokéRadar': 'pokéradar-off.png',
u'Using PokéRadar': 'pokéradar-on.png',
'No game in slot 2': 'slot2-none.png',
'Ruby in slot 2': 'slot2-ruby.png',
'Sapphire in slot 2': 'slot2-sapphire.png',
'Emerald in slot 2': 'slot2-emerald.png',
'FireRed in slot 2': 'slot2-firered.png',
'LeafGreen in slot 2': 'slot2-leafgreen.png',
'Radio off': 'radio-off.png',
'Hoenn radio': 'radio-hoenn.png',
'Sinnoh radio': 'radio-sinnoh.png',
}
def __before__(self, action, **params):
super(PokedexController, self).__before__(action, **params)
c.javascripts.append(('pokedex', 'pokedex'))
def __call__(self, *args, **params):
"""Run the controller, making sure to discard the Pokédex session when
we're done.
This is largely copied from the default Pylons lib.base.__call__.
"""
try:
return super(PokedexController, self).__call__(*args, **params)
finally:
db.pokedex_session.remove()
def index(self):
return ''
def media(self, path):
(mimetype, whatever) = mimetypes.guess_type(path)
response.headers['content-type'] = mimetype
pkg_path = "data/media/%s" % path
return pkg_resources.resource_string('pokedex', pkg_path)
def lookup(self):
"""Find a page in the Pokédex given a name.
Also performs fuzzy search.
"""
name = request.params.get('lookup', None)
if not name:
# Nothing entered. What? Where did you come from?
# There's nothing sensible to do here. Let's use an obscure status
# code, like 204 No Content.
abort(204)
name = name.strip()
### Special stuff that bypasses lookup
if name.lower() == 'obdurate':
# Pokémon flavor text in the D/P font
return self._egg_unlock_cheat('obdurate')
### Regular lookup
valid_types = []
c.subpage = None
# Subpage suffixes: 'flavor' and 'locations' for Pokémon bits
if name.lower().endswith(u' flavor'):
c.subpage = 'flavor'
valid_types = [u'pokemon']
name = re.sub('(?i) flavor$', '', name)
elif name.lower().endswith(u' locations'):
c.subpage = 'locations'
valid_types = [u'pokemon']
name = re.sub('(?i) locations$', '', name)
results = db.pokedex_lookup.lookup(name, valid_types=valid_types)
if len(results) == 0:
# Nothing found
# XXX real error page
return self._not_found()
elif len(results) == 1:
# Only one possibility! Hooray!
if not results[0].exact:
# Wasn't an exact match, but we can only figure out one thing
# the user might have meant, so redirect to it anyway
h.flash(u"""Nothing in the Pokédex is exactly called "{0}". """
u"""This is the only close match.""".format(name),
icon='spell-check-error')
return redirect(pokedex_helpers.make_thingy_url(
results[0].object, subpage=c.subpage))
else:
# Multiple matches. Could be exact (e.g., Metronome) or a fuzzy
# match. Result page looks about the same either way
c.input = name
c.exact = results[0].exact
c.results = results
c.table_labels = self.table_labels
return render('/pokedex/lookup_results.mako')
def _not_found(self):
# XXX make this do fuzzy search or whatever
abort(404)
def _egg_unlock_cheat(self, cheat):
"""Easter egg that writes Pokédex data in the Pokémon font."""
cheat_key = "cheat_%s" % cheat
session[cheat_key] = not session.get(cheat_key, False)
session.save()
c.this_cheat_key = cheat_key
return render('/pokedex/cheat_unlocked.mako')
def suggest(self):
"""Returns a JSON array of Pokédex lookup suggestions, compatible with
the OpenSearch spec.
"""
prefix = request.params.get('prefix', None)
if not prefix:
return '[]'
valid_types = request.params.getall('type')
suggestions = db.pokedex_lookup.prefix_lookup(
prefix,
valid_types=valid_types,
)
names = [] # actual terms that will appear in the list
metadata = [] # parallel array of metadata my suggest widget uses
for suggestion in suggestions:
row = suggestion.object
names.append(suggestion.name)
meta = dict(
type=row.__singlename__,
indexed_name=suggestion.indexed_name,
)
# Get an accompanying image. Moves get their type; abilities get
# nothing; everything else gets the obvious corresponding icon
image = None
if isinstance(row, tables.Pokemon):
if row.forme_name:
image = u"icons/{0}-{1}.png".format(row.national_id, row.forme_name)
else:
image = u"icons/{0}.png".format(row.national_id)
elif isinstance(row, tables.Move):
image = u"chrome/types/{0}.png".format(row.type.name)
elif isinstance(row, tables.Type):
image = u"chrome/types/{0}.png".format(row.name)
elif isinstance(row, tables.Item):
image = u"items/{0}.png".format(
pokedex_helpers.filename_from_name(row.name))
if image:
meta['image'] = url(controller='dex', action='media',
path=image,
qualified=True)
# Give a country icon so JavaScript doesn't have to hardcore Spline
# paths. Don't *think* we need to give the long language name...
meta['language'] = suggestion.iso3166
meta['language_icon'] = h.static_uri(
'spline',
'flags/{0}.png'.format(suggestion.iso3166),
qualified=True
)
metadata.append(meta)
normalized_name = db.pokedex_lookup.normalize_name(prefix)
if ':' in normalized_name:
_, normalized_name = normalized_name.split(':', 1)
data = [
prefix,
names,
None, # descriptions
None, # query URLs
metadata, # my metadata; outside the spec's range
normalized_name, # the key we actually looked for
]
### Format as JSON. Also sets the content-type and supports JSONP --
### if there's a 'callback' param, the return value will be wrapped
### appropriately.
json_data = json.dumps(data)
if 'callback' in request.params:
# Pad and change the content-type to match a script tag
json_data = "{callback}({json})".format(
callback=request.params['callback'],
json=json_data,
)
response.headers['Content-Type'] = 'text/javascript; charset=UTF-8'
else:
# Just set content type
response.headers['Content-Type'] = 'application/json; charset=UTF-8'
return json_data
def _prev_next_pokemon(self, pokemon):
"""Returns a 2-tuple of the previous and next Pokémon."""
max_id = db.pokedex_session.query(tables.Pokemon) \
.filter_by(forme_base_pokemon_id=None) \
.count()
prev_pokemon = db.pokedex_session.query(tables.Pokemon).get(
(c.pokemon.national_id - 1 - 1) % max_id + 1)
next_pokemon = db.pokedex_session.query(tables.Pokemon).get(
(c.pokemon.national_id - 1 + 1) % max_id + 1)
return prev_pokemon, next_pokemon
@jsonify
def parse_size(self):
u"""Parses a height or weight and returns a bare number in Pokémon
units.
Query params are `size`, the string, and `mode`, either 'height' or
'weight'.
"""
size = request.params.get('size', None)
mode = request.params.get('mode', None)
if not size or mode not in (u'height', u'weight'):
# Totally bogus!
abort(400)
try:
return parse_size(size, mode)
except (IndexError, ValueError):
abort(400)
def pokemon_list(self):
return render('/pokedex/pokemon_list.mako')
def pokemon(self, name=None):
form = request.params.get('form', None)
try:
pokemon_q = db.pokemon_query(name, form=form)
# Need to eagerload some, uh, little stuff
pokemon_q = pokemon_q.options(
eagerload('evolution_chain.pokemon'),
eagerload('generation'),
eagerload('items.item'),
eagerload('items.version'),
eagerload('pokemon_color'),
eagerload('pokemon_habitat'),
eagerload('shape'),
subqueryload_all('stats.stat'),
subqueryload_all('types.target_efficacies.damage_type'),
# XXX SQLAlchemy totally barfs if I try to eagerload things
# that are only on the normal_form. No idea why. This
# includes: dex_numbers, foreign_names, flavor_text
)
# Alright, execute
c.pokemon = pokemon_q.one()
except NoResultFound:
return self._not_found()
# Some Javascript
c.javascripts.append(('pokedex', 'pokemon'))
### Previous and next for the header
c.prev_pokemon, c.next_pokemon = self._prev_next_pokemon(c.pokemon)
# Let's cache this bitch
return self.cache_content(
key=u';'.join([c.pokemon.name, c.pokemon.forme_name or u'']),
template='/pokedex/pokemon.mako',
do_work=self._do_pokemon,
)
def _do_pokemon(self, name_plus_form):
name, form = name_plus_form.split(u';')
if not form:
form = None
### Type efficacy
c.type_efficacies = defaultdict(lambda: 100)
for target_type in c.pokemon.types:
for type_efficacy in target_type.target_efficacies:
c.type_efficacies[type_efficacy.damage_type] *= \
type_efficacy.damage_factor
# The defaultdict starts at 100, and every damage factor is
# a percentage. Dividing by 100 with every iteration turns the
# damage factor into a decimal percentage taken of the starting
# 100, without using floats and regardless of number of types
c.type_efficacies[type_efficacy.damage_type] //= 100
### Breeding compatibility
# To simplify this list considerably, we want to find the BASE FORM of
# every Pokémon compatible with this one. The base form is either:
# - a Pokémon that has breeding groups and no evolution parent, or
# - a Pokémon whose parent has no breeding groups (i.e. 15 only)
# and no evolution parent.
# The below query self-joins `pokemon` to itself and tests the above
# conditions.
# ASSUMPTION: Every base-form Pokémon in a breedable family can breed.
# ASSUMPTION: Every family has the same breeding groups throughout.
if c.pokemon.gender_rate == -1:
# Genderless; Ditto only
ditto = db.pokedex_session.query(tables.Pokemon) \
.filter_by(name=u'Ditto').one()
c.compatible_families = [ditto]
elif c.pokemon.egg_groups[0].id == 15:
# No Eggs group
pass
else:
parent_a = aliased(tables.Pokemon)
grandparent_a = aliased(tables.Pokemon)
egg_group_ids = [_.id for _ in c.pokemon.egg_groups]
q = db.pokedex_session.query(tables.Pokemon)
q = q.join(tables.PokemonEggGroup) \
.outerjoin((parent_a, tables.Pokemon.parent_pokemon)) \
.outerjoin((grandparent_a, parent_a.parent_pokemon)) \
.filter(tables.Pokemon.gender_rate != -1) \
.filter(tables.Pokemon.forme_base_pokemon_id == None) \
.filter(
# This is a "base form" iff either:
or_(
# This is the root form (no parent)
# (It has to be breedable too, but we're filtering by
# an egg group so that's granted)
parent_a.id == None,
# Or this can breed and evolves from something that
# can't
and_(parent_a.egg_groups.any(id=15),
grandparent_a.id == None),
)
) \
.filter(tables.PokemonEggGroup.egg_group_id.in_(egg_group_ids)) \
.order_by(tables.Pokemon.id)
c.compatible_families = q.all()
### Wild held items
# Stored separately per version due to *rizer shenanigans (grumble).
# Items also sometimes change over version groups within a generation.
# So in some 99.9% of cases we want to merge them to some extent,
# usually collapsing an entire version group or an entire generation.
# Thus we store these as:
# generation => { (version, ...) => [ (item, rarity), ... ] }
# In the case of all versions within a generation being merged, the
# key is None instead of a tuple of version objects.
c.held_items = {}
# First group by the things we care about
# n.b.: the keys are tuples of versions, not individual versions!
version_held_items = {}
# Preload with a list of versions so we know which ones are empty
generations = db.pokedex_session.query(tables.Generation) \
.options( eagerload('versions') ) \
.filter(tables.Generation.id >= max(3, c.pokemon.generation.id))
for generation in generations:
version_held_items[generation] = {}
for version in generation.versions:
version_held_items[generation][version,] = []
for pokemon_item in c.pokemon.items:
generation = pokemon_item.version.generation
version_held_items[generation][pokemon_item.version,] \
.append((pokemon_item.item, pokemon_item.rarity))
# Then group these into the form above
for generation, gen_held_items in version_held_items.items():
# gen_held_items: { (versions...): [(item, rarity)...] }
# Group by item, rarity, sorted by version...
inverted_held_items = defaultdict(tuple)
for version_tuple, item_rarity_list in \
sorted(gen_held_items.items(), key=lambda (k, v): k[0].id):
inverted_held_items[tuple(item_rarity_list)] += version_tuple
# Then flip back to versions as keys
c.held_items[generation] = {}
for item_rarity_tuple, version_tuple in inverted_held_items.items():
c.held_items[generation][version_tuple] = item_rarity_tuple
### Evolution
# Format is a matrix as follows:
# [
# [ None, Eevee, Vaporeon, None ]
# [ None, None, Jolteon, None ]
# [ None, None, Flareon, None ]
# ... etc ...
# ]
# That is, each row is a physical row in the resulting table, and each
# contains four elements, one per row: Baby, Base, Stage 1, Stage 2.
# The Pokémon are actually dictionaries with 'pokemon' and 'span' keys,
# where the span is used as the HTML cell's rowspan -- e.g., Eevee has a
# total of seven descendents, so it would need to span 7 rows.
c.evolution_table = []
family = c.pokemon.evolution_chain.pokemon
# Prefetch the evolution details
db.pokedex_session.query(tables.Pokemon) \
.filter(tables.Pokemon.id.in_(_.id for _ in family)) \
.options(
eagerload_all('parent_evolution.trigger'),
eagerload_all('parent_evolution.trigger_item'),
eagerload_all('parent_evolution.held_item'),
eagerload_all('parent_evolution.location'),
eagerload_all('parent_evolution.known_move'),
eagerload_all('parent_evolution.party_pokemon'),
) \
.all()
# Strategy: build this table going backwards.
# Find a leaf, build the path going back up to its root. Remember all
# of the nodes seen along the way. Find another leaf not seen so far.
# Build its path backwards, sticking it to a seen node if one exists.
# Repeat until there are no unseen nodes.
seen_nodes = {}
while True:
# First, find some unseen nodes
unseen_leaves = []
for pokemon in family:
if pokemon in seen_nodes:
continue
children = []
# A Pokémon is a leaf if it has no evolutionary children, so...
for possible_child in family:
if possible_child in seen_nodes:
continue
if possible_child.parent_pokemon == pokemon:
children.append(possible_child)
if len(children) == 0:
unseen_leaves.append(pokemon)
# If there are none, we're done! Bail.
# Note that it is impossible to have any unseen non-leaves if there
# are no unseen leaves; every leaf's ancestors become seen when we
# build a path to it.
if len(unseen_leaves) == 0:
break
# Sort by id, then by forme if any. This keeps evolutions in about
# the order people expect, while clustering formes together.
unseen_leaves.sort(key=lambda x: (x.national_id, x.forme_name))
leaf = unseen_leaves[0]
# root, parent_n, ... parent2, parent1, leaf
current_path = []
# Finally, go back up the tree to the root
current_pokemon = leaf
while current_pokemon:
# The loop bails just after current_pokemon is no longer the
# root, so this will give us the root after the loop ends;
# we need to know if it's a baby to see whether to indent the
# entire table below
root_pokemon = current_pokemon
if current_pokemon in seen_nodes:
current_node = seen_nodes[current_pokemon]
# Don't need to repeat this node; the first instance will
# have a rowspan
current_path.insert(0, None)
else:
current_node = {
'pokemon': current_pokemon,
'span': 0,
}
current_path.insert(0, current_node)
seen_nodes[current_pokemon] = current_node
# This node has one more row to span: our current leaf
current_node['span'] += 1
current_pokemon = current_pokemon.parent_pokemon
# We want every path to have four nodes: baby, basic, stage 1 and 2.
# Every root node is basic, unless it's defined as being a baby.
# So first, add an empty baby node at the beginning if this is not
# a baby.
# We use an empty string to indicate an empty cell, as opposed to a
# complete lack of cell due to a tall cell from an earlier row.
if not root_pokemon.is_baby:
current_path.insert(0, '')
# Now pad to four if necessary.
while len(current_path) < 4:
current_path.append('')
c.evolution_table.append(current_path)
### Stats
# This takes a lot of queries :(
c.stats = {} # stat_name => { border, background, percentile }
# (also 'value' for total)
stat_total = 0
total_stat_rows = db.pokedex_session.query(tables.PokemonStat) \
.filter_by(stat=c.pokemon.stats[0].stat) \
.count()
physical_attack = None
special_attack = None
for pokemon_stat in c.pokemon.stats:
stat_info = c.stats[pokemon_stat.stat.name] = {}
stat_total += pokemon_stat.base_stat
q = db.pokedex_session.query(tables.PokemonStat) \
.filter_by(stat=pokemon_stat.stat)
less = q.filter(tables.PokemonStat.base_stat < pokemon_stat.base_stat) \
.count()
equal = q.filter(tables.PokemonStat.base_stat == pokemon_stat.base_stat) \
.count()
percentile = (less + equal * 0.5) / total_stat_rows
stat_info['percentile'] = percentile
# Colors for the stat bars, based on percentile
stat_info['background'] = bar_color(percentile, 0.9)
stat_info['border'] = bar_color(percentile, 0.8)
c.better_damage_class = c.pokemon.better_damage_class
# Percentile for the total
# Need to make a derived table that fakes pokemon_id, total_stats
stat_sum_tbl = db.pokedex_session.query(
func.sum(tables.PokemonStat.base_stat).label('stat_total')
) \
.group_by(tables.PokemonStat.pokemon_id) \
.subquery()
q = db.pokedex_session.query(stat_sum_tbl)
less = q.filter(stat_sum_tbl.c.stat_total < stat_total).count()
equal = q.filter(stat_sum_tbl.c.stat_total == stat_total).count()
percentile = (less + equal * 0.5) / total_stat_rows
c.stats['total'] = {
'percentile': percentile,
'value': stat_total,
'background': bar_color(percentile, 0.9),
'border': bar_color(percentile, 0.8),
}
### Sizing
c.trainer_height = pokedex_helpers.trainer_height
c.trainer_weight = pokedex_helpers.trainer_weight
heights = dict(pokemon=c.pokemon.height, trainer=c.trainer_height)
c.heights = pokedex_helpers.scale_sizes(heights)
# Strictly speaking, weight takes three dimensions. But the real
# measurement here is just "space taken up", and these are sprites, so
# the space they actually take up is two-dimensional.
weights = dict(pokemon=c.pokemon.weight, trainer=c.trainer_weight)
c.weights = pokedex_helpers.scale_sizes(weights, dimensions=2)
### Encounters -- briefly
# One row per version, then a list of places the Pokémon appears.
# version => terrain => location_area => conditions => CombinedEncounters
c.locations = defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(
CombinedEncounter
)
)
)
)
q = db.pokedex_session.query(tables.Encounter) \
.filter_by(pokemon=c.pokemon) \
.options(
eagerload_all('condition_value_map.condition_value'),
eagerload_all('version'),
eagerload_all('slot.terrain'),
eagerload_all('location_area.location'),
)
for encounter in q:
condition_values = [cv for cv in encounter.condition_values
if not cv.is_default]
c.locations[encounter.version] \
[encounter.slot.terrain] \
[encounter.location_area] \
[tuple(condition_values)].combine_with(encounter)
# Strip each version+location down to just the condition values that
# are the most common per terrain
# Results in:
# version => location_area => terrain => (conditions, combined_encounter)
for version, terrain_etc in c.locations.items():
for terrain, area_condition_encounters \
in terrain_etc.items():
for location_area, condition_encounters \
in area_condition_encounters.items():
# Sort these by rarity
condition_encounter_items = condition_encounters.items()
condition_encounter_items.sort(
key=lambda (conditions, combined_encounter):
combined_encounter.rarity
)
# Use the last one, which is most common
area_condition_encounters[location_area] \
= condition_encounter_items[-1]
# Used for prettiness
c.encounter_terrain_icons = self.encounter_terrain_icons
### Moves
# Oh no.
# Moves are grouped by method.
# Within a method is a list of move rows.
# A move row contains a level or other status per version group, plus
# a move id.
# Thus: ( method, [ (move, { version_group => data, ... }), ... ] )
# First, though, we make a dictionary for quick access to each method's
# list.
# "data" is a dictionary of whatever per-version information is
# appropriate for this move method, such as a TM number or level.
move_methods = defaultdict(list)
# Grab the rows with a manual query so we can sort them in about the
# order they go in the table. This should keep it as compact as
# possible. Levels go in level order, and machines go in TM number
# order
q = db.pokedex_session.query(tables.PokemonMove) \
.filter_by(pokemon_id=c.pokemon.id) \
.outerjoin((tables.Machine, tables.PokemonMove.machine)) \
.options(
contains_eager(tables.PokemonMove.machine),
eagerload_all('move.damage_class'),
eagerload_all('move.move_effect'),
eagerload_all('move.type'),
eagerload_all('version_group'),
) \
.order_by(tables.PokemonMove.level.asc(),
tables.Machine.machine_number.asc(),
tables.PokemonMove.order.asc(),
tables.PokemonMove.version_group_id.asc()) \
.all()
for pokemon_move in q:
method_list = move_methods[pokemon_move.method]
this_vg = pokemon_move.version_group
# Create a container for data for this method and version(s)
vg_data = dict()
# TMs need to know their own TM number
if pokemon_move.method.name == 'Machine':
vg_data['machine'] = pokemon_move.machine.machine_number
# Find the best place to insert a row.
# In general, we just want the move names in order, so we can just
# tack rows on and sort them at the end. However! Level-up moves
# must stay in the same order within a version group, and TMs are
# similarly ordered by number. So we have to do some special
# ordering here.
# These two vars are the boundaries of where we can find or insert
# a new row. Only level-up moves have these restrictions
lower_bound = None
upper_bound = None
if pokemon_move.method.name in ('Level up', 'Machine'):
vg_data['sort'] = (pokemon_move.level,
vg_data.get('machine', None),
pokemon_move.order)
vg_data['level'] = pokemon_move.level
# Find the next-lowest and next-highest rows. Our row must fit
# between those
for i, (move, version_group_data) in enumerate(method_list):
if this_vg not in version_group_data:
# Can't be a bound; not related to this version!
continue
if version_group_data[this_vg]['sort'] > vg_data['sort']:
if not upper_bound or i < upper_bound:
upper_bound = i
if version_group_data[this_vg]['sort'] < vg_data['sort']:
if not lower_bound or i > lower_bound:
lower_bound = i
# We're using Python's slice syntax, which includes the lower bound
# and excludes the upper. But we want to exclude both, so bump the
# lower bound
if lower_bound != None:
lower_bound += 1
# Check for a free existing row for this move; if one exists, we
# can just add our data to that same row.
# It's also possible that an existing row for this move can be
# shifted forwards into our valid range, if there are no
# intervening rows with levels in the same version groups that that
# row has. This is unusual, but happens when a lot of moves have
# been shuffled around multiple times, like with Pikachu
valid_row = None
for i, table_row in enumerate(method_list[0:upper_bound]):
move, version_group_data = table_row
# If we've already found a row for version X outside our valid
# range but run across another row with a level for X, that row
# cannot be moved up, so it's not usable
if valid_row and set(valid_row[1].keys()).intersection(
set(version_group_data.keys())):
valid_row = None
if move == pokemon_move.move \
and this_vg not in version_group_data:
valid_row = table_row
# If we're inside the valid range, just take the first row
# we find. If we're outside it, we want the last possible
# row to avoid shuffling the table too much. So only break
# if this row is inside lb/ub
if i >= lower_bound:
break
if valid_row:
if method_list.index(valid_row) < lower_bound:
# Move the row up if necessary
method_list.remove(valid_row)
method_list.insert(lower_bound, valid_row)
valid_row[1][this_vg] = vg_data
continue
# Otherwise, just make a new row and stuff it in.
# Rows are sorted by level before version group. If we see move X
# for a level, then move Y for another game, then move X for that
# other game, the two X's should be able to collapse. Thus we put
# the Y before the first X to leave space for the second X -- that
# is, add new rows as early in the list as possible
new_row = pokemon_move.move, { this_vg: vg_data }
method_list.insert(lower_bound or 0, new_row)
# Convert dictionary to our desired list of tuples
c.moves = move_methods.items()
c.moves.sort(key=_pokemon_move_method_sort_key)
# Sort non-level moves by name
for method, method_list in c.moves:
if method.name in ('Level up', 'Machine'):
continue
method_list.sort(key=lambda (move, version_group_data): move.name)
# Finally, collapse identical columns within the same generation
c.move_columns \
= _collapse_pokemon_move_columns(table=c.moves, thing=c.pokemon)
# Grab list of all the version groups with tutor moves
c.move_tutor_version_groups = _move_tutor_version_groups(c.moves)
return
def pokemon_flavor(self, name):
try:
c.pokemon = db.pokemon_query(name).one()
except NoResultFound:
return self._not_found()
# Deal with forms. Remember, this could be either a physical form or
# an aesthetic form!
c.form = request.params.get('form', None)
form_sprites = c.pokemon.form_sprites
# If we don't have a form name, but this Pokémon has forms, we need to
# know the default
if not c.form and c.pokemon.forme_name:
# If there's a physical form name, just use that. Don't redirect,
# as the physical form name is universally treated as the "default"
# and thus interchangeable with the plain Pokémon name -- that is,
# Normal Deoxys will always be /dex/pokemon/deoxys and vice versa
c.form = c.pokemon.forme_name
elif not c.form and \
form_sprites and \
not any(_.name == '' for _ in form_sprites) and \
c.pokemon.default_form_sprite.name:
# If there are aesthetic forms, but not one without a name, and we
# didn't GET a name, then redirect to the default. In this case,
# you can't see flavor for "just Unown"; there's no such thing.
# You have to pick one, and if you don't, then I'll pick one for
# you
redirect(url.current(form=c.pokemon.default_form_sprite.name))
c.forms = [_.name for _ in c.pokemon.form_sprites]
c.forms.sort()
# Every form should have a recorded sprite; find it
if c.form:
try:
spr_form = db.pokedex_session \
.query(tables.PokemonFormSprite) \
.filter_by(pokemon_id=c.pokemon.id, name=c.form) \
.one()
except NoResultFound:
# Not a valid form!
abort(404)
c.introduced_in = spr_form.introduced_in
else:
c.introduced_in = c.pokemon.generation.version_groups[0]
# Figure out if a sprite form appears in the overworld. If this isn't
# a sprite form, the answer is obviously yes
c.appears_in_overworld = True
default_form_sprite = c.pokemon.default_form_sprite
if c.pokemon.form_group and c.pokemon.form_group.is_battle_only \
and default_form_sprite and c.form != default_form_sprite.name:
# That is, if this Pokémon's forms aren't battle-only, and it's not
# the default
c.appears_in_overworld = False
### Previous and next for the header
c.prev_pokemon, c.next_pokemon = self._prev_next_pokemon(c.pokemon)
### Sizing
c.trainer_height = pokedex_helpers.trainer_height
c.trainer_weight = pokedex_helpers.trainer_weight
c.pokemon_height = c.pokemon.height
c.pokemon_weight = c.pokemon.weight
# Forms with separate Pokémon records sometimes differ
# XXX This kinda sucks, but it'll do until we fix Pokémon forms
if c.form:
for form in c.pokemon.formes:
if form.forme_name == c.form:
c.pokemon_height = form.height
c.pokemon_weight = form.weight
break
heights = dict(pokemon=c.pokemon_height, trainer=c.trainer_height)
c.heights = pokedex_helpers.scale_sizes(heights)
# Strictly speaking, weight takes three dimensions. But the real
# measurement here is just "space taken up", and these are sprites, so
# the space they actually take up is two-dimensional.
weights = dict(pokemon=c.pokemon_weight, trainer=c.trainer_weight)
c.weights = pokedex_helpers.scale_sizes(weights, dimensions=2)
return render('/pokedex/pokemon_flavor.mako')
def pokemon_locations(self, name):
"""Spits out a page listing detailed location information for this
Pokémon.
"""
try:
c.pokemon = db.pokemon(name)
except NoResultFound:
return self._not_found()
### Previous and next for the header
c.prev_pokemon, c.next_pokemon = self._prev_next_pokemon(c.pokemon)
# Cache it yo
return self.cache_content(
key=c.pokemon.name,
template='/pokedex/pokemon_locations.mako',
do_work=self._do_pokemon_locations,
)
def _do_pokemon_locations(self, name):
# For the most part, our data represents exactly what we're going to
# show. For a given area in a given game, this Pokémon is guaranteed
# to appear some x% of the time no matter what the state of the world
# is, and various things like swarms or the radar may add on to this
# percentage.
# Encounters are grouped by region -- <h1>s.
# Then by terrain -- table sections.
# Then by area -- table rows.
# Then by version -- table columns.
# Finally, condition values associated with levels/rarity.
q = db.pokedex_session.query(tables.Encounter) \
.options(
eagerload_all('condition_value_map.condition_value'),
eagerload_all('version'),
eagerload_all('slot.terrain'),
eagerload_all('location_area.location'),
)\
.filter(tables.Encounter.pokemon == c.pokemon)
# region => terrain => area => version => condition =>
# condition_values => encounter_bits
grouped_encounters = defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(
list
)
)
)
)
)
)
# Locations cluster by region, primarily to avoid having a lot of rows
# where one version group or the other is blank; that doesn't make for
# fun reading. To put the correct version headers in each region
# table, we need to know what versions correspond to which regions.
# Normally, this can be done by examining region.version_groups.
# However, some regions (Kanto) appear in a ridiculous number of games.
# To avoid an ultra-wide table when not necessary, only *generations*
# that actually contain this Pokémon should appear.
# So if the Pokémon appears in Kanto in Crystal, show all of G/S/C. If
# it doesn't appear in any of the three, show none of them.
# Last but not least, show generations in reverse order, so the more
# important (i.e., recent) versions are on the left.
# Got all that?
region_generations = defaultdict(set)
for encounter in q.all():
# Fetches the list of encounters that match this region, version,
# terrain, etc.
region = encounter.location_area.location.region
# n.b.: conditions and values must be tuples because lists aren't
# hashable.
encounter_bits = grouped_encounters \
[region] \
[encounter.slot.terrain] \
[encounter.location_area] \
[encounter.version] \
[ tuple(cv.condition for cv in encounter.condition_values) ] \
[ tuple(encounter.condition_values) ]
# Combine "level 3-4, 50%" and "level 3-4, 20%" into "level 3-4, 70%".
existing_encounter = filter(lambda enc: enc['min_level'] == encounter.min_level
and enc['max_level'] == encounter.max_level,
encounter_bits)
if existing_encounter:
existing_encounter[0]['rarity'] += encounter.slot.rarity
else:
encounter_bits.append({
'min_level': encounter.min_level,
'max_level': encounter.max_level,
'rarity': encounter.slot.rarity,
})
# Remember that this generation appears in this region
region_generations[region].add(encounter.version.version_group.generation)
c.grouped_encounters = grouped_encounters
# Pass some data/functions
c.encounter_terrain_icons = self.encounter_terrain_icons
c.encounter_condition_value_icons = self.encounter_condition_value_icons
c.level_range = level_range
# See above. Versions for each region are those in that region that
# are part of a generation where this Pokémon appears -- in reverse
# generation order.
c.region_versions = defaultdict(list)
for region, generations in region_generations.items():
for version_group in region.version_groups:
if version_group.generation not in generations:
continue
c.region_versions[region][0:0] = version_group.versions
return
def moves_list(self):
return render('/pokedex/move_list.mako')
def moves(self, name):
try:
c.move = db.get_by_name_query(tables.Move, name).one()
except NoResultFound:
return self._not_found()
### Prev/next for header
max_id = db.pokedex_session.query(tables.Move).count()
c.prev_move = db.pokedex_session.query(tables.Move).get(
(c.move.id - 1 - 1) % max_id + 1)
c.next_move = db.pokedex_session.query(tables.Move).get(
(c.move.id - 1 + 1) % max_id + 1)
return self.cache_content(
key=c.move.name,
template='/pokedex/move.mako',
do_work=self._do_moves,
)
def _do_moves(self, name):
# Eagerload
db.pokedex_session.query(tables.Move) \
.filter_by(id=c.move.id) \
.options(
eagerload('damage_class'),
eagerload('type'),
eagerload('target'),
eagerload('move_effect'),
eagerload('move_effect.category_map.category'),
eagerload('contest_effect'),
eagerload('contest_type'),
eagerload('super_contest_effect'),
subqueryload_all('move_flags.flag'),
subqueryload_all('type.damage_efficacies.target_type'),
subqueryload_all('foreign_names.language'),
subqueryload_all('flavor_text.version_group.generation'),
subqueryload_all('flavor_text.version_group.versions'),
subqueryload_all('contest_combo_first.second'),
subqueryload_all('contest_combo_second.first'),
subqueryload_all('super_contest_combo_first.second'),
subqueryload_all('super_contest_combo_second.first'),
) \
.one()
# Used for item linkage
c.pp_up = db.pokedex_session.query(tables.Item) \
.filter_by(name=u'PP Up').one()
### Type efficacy
c.type_efficacies = {}
for type_efficacy in c.move.type.damage_efficacies:
c.type_efficacies[type_efficacy.target_type] = \
type_efficacy.damage_factor
### Power percentile
if c.move.power in (0, 1):
c.power_percentile = None
else:
q = db.pokedex_session.query(tables.Move) \
.filter(tables.Move.power > 1)
less = q.filter(tables.Move.power < c.move.power).count()
equal = q.filter(tables.Move.power == c.move.power).count()
c.power_percentile = (less + equal * 0.5) / q.count()
### Flags
c.flags = []
move_flags = db.pokedex_session.query(tables.MoveFlagType) \
.order_by(tables.MoveFlagType.id.asc())
for flag in move_flags:
has_flag = flag in c.move.flags
c.flags.append((flag, has_flag))
### Machines
q = db.pokedex_session.query(tables.Generation) \
.filter(tables.Generation.id >= c.move.generation.id) \
.options(
eagerload('version_groups'),
) \
.order_by(tables.Generation.id.asc())
raw_machines = {}
# raw_machines = { generation: { version_group: machine_number } }
c.machines = {}
# c.machines: generation => [ (versions, machine_number), ... ]
# Populate an empty dict first so we know which versions don't have a
# TM for this move
for generation in q:
c.machines[generation] = []
raw_machines[generation] = {}
for version_group in generation.version_groups:
raw_machines[generation][version_group] = None
# Fetch the actual machine numbers
for machine in c.move.machines:
raw_machines[machine.version_group.generation] \
[machine.version_group] = machine.machine_number
# Collapse that into an easily-displayed form
VersionMachine = namedtuple('VersionMachine',
['version_group', 'machine_number'])
# dictionary -> list of tuples
for generation, vg_numbers in raw_machines.items():
for version_group, machine_number in vg_numbers.items():
c.machines[generation].append(
VersionMachine(version_group=version_group,
machine_number=machine_number,
)
)
for generation, vg_numbers in c.machines.items():
machine_numbers = [_.machine_number for _ in vg_numbers]
if len(set(machine_numbers)) == 1:
# Merge generations that have the same machine number everywhere
c.machines[generation] = [( None, vg_numbers[0].machine_number )]
else:
# Otherwise, sort by version group
vg_numbers.sort(key=lambda item: item.version_group.id)
### Similar moves
c.similar_moves = db.pokedex_session.query(tables.Move) \
.join(tables.Move.move_effect) \
.filter(tables.MoveEffect.id == c.move.effect_id) \
.filter(tables.Move.id != c.move.id) \
.options(eagerload('type')) \
.all()
### Pokémon
# This is kinda like the moves for Pokémon, but backwards. Imagine
# that! We have the same basic structure, a list of:
# (method, [ (pokemon, { version_group => data, ... }), ... ])
pokemon_methods = defaultdict(dict)
# Sort by descending level because the LAST level seen is the one that
# ends up in the table, and the lowest level is the most useful
q = db.pokedex_session.query(tables.PokemonMove) \
.options(
eagerload('method'),
eagerload('pokemon'),
eagerload('version_group'),
eagerload('pokemon.form_group'),
eagerload('pokemon.stats.stat'),
eagerload('pokemon.stats.stat.damage_class'),
# Pokémon table stuff
subqueryload('pokemon.abilities'),
subqueryload('pokemon.egg_groups'),
subqueryload('pokemon.formes'),
subqueryload('pokemon.stats'),
subqueryload('pokemon.types'),
) \
.filter(tables.PokemonMove.move_id == c.move.id) \
.order_by(tables.PokemonMove.level.desc())
for pokemon_move in q:
method_list = pokemon_methods[pokemon_move.method]
this_vg = pokemon_move.version_group
# Create a container for data for this method and version(s)
vg_data = dict()
if pokemon_move.method.name == 'Level up':
# Level-ups need to know what level
vg_data['level'] = pokemon_move.level
elif pokemon_move.method.name == 'Machine':
# TMs need to know their own TM number
machine = first(lambda _: _.version_group == this_vg,
c.move.machines)
if machine:
vg_data['machine'] = machine.machine_number
# The Pokémon version does sorting here, but we're just going to
# sort by name regardless of method, so leave that until last
# Add in the move method for this Pokémon
if pokemon_move.pokemon not in method_list:
method_list[pokemon_move.pokemon] = dict()
method_list[pokemon_move.pokemon][this_vg] = vg_data
# Convert each method dictionary to a list of tuples
c.better_damage_classes = {}
for method in pokemon_methods.keys():
# Also grab Pokémon's better damage classes
for pokemon in pokemon_methods[method].keys():
if pokemon not in c.better_damage_classes:
c.better_damage_classes[pokemon] = \
pokemon.better_damage_class
pokemon_methods[method] = pokemon_methods[method].items()
# Convert the entire dictionary to a list of tuples and sort it
c.pokemon = pokemon_methods.items()
c.pokemon.sort(key=_pokemon_move_method_sort_key)
# Sort by Pokémon number
for method, method_list in c.pokemon:
method_list.sort(key=lambda (pokemon, whatever): (pokemon.national_id, pokemon.forme_name))
# Finally, collapse identical columns within the same generation
c.pokemon_columns \
= _collapse_pokemon_move_columns(table=c.pokemon, thing=c.move)
# Grab list of all the version groups with tutor moves
c.move_tutor_version_groups = _move_tutor_version_groups(c.pokemon)
return
def types_list(self):
c.types = db.pokedex_session.query(tables.Type) \
.order_by(tables.Type.name) \
.options(eagerload('damage_efficacies')) \
.all()
try:
c.secondary_type = db.pokedex_session.query(tables.Type) \
.filter(tables.Type.name == request.params['secondary']) \
.one()
c.secondary_efficacy = dict(
[(efficacy.damage_type, efficacy.damage_factor) for efficacy in c.secondary_type.target_efficacies]
)
except (KeyError, NoResultFound):
c.secondary_type = None
c.secondary_efficacy = defaultdict(lambda: 100)
# Count up a relative score for each type, both attacking and
# defending. Normal damage counts for 0; super effective counts for
# +1; not very effective counts for -1. Ineffective counts for -2.
# With dual types, x4 is +2 and x1/4 is -2; ineffective is -4.
# Everything is of course the other way around for defense.
attacking_score_conversion = {
400: +2,
200: +1,
100: 0,
50: -1,
25: -2,
0: -2,
}
if c.secondary_type:
attacking_score_conversion[0] = -4
c.attacking_scores = defaultdict(int)
c.defending_scores = defaultdict(int)
for attacking_type in c.types:
for efficacy in attacking_type.damage_efficacies:
defending_type = efficacy.target_type
factor = efficacy.damage_factor * \
c.secondary_efficacy[attacking_type] // 100
c.attacking_scores[attacking_type] += attacking_score_conversion[factor]
c.defending_scores[defending_type] -= attacking_score_conversion[factor]
return render('/pokedex/type_list.mako')
def types(self, name):
try:
c.type = db.get_by_name(tables.Type, name)
except NoResultFound:
return self._not_found()
### Prev/next for header
max_id = db.pokedex_session.query(tables.Type).count()
c.prev_type = db.pokedex_session.query(tables.Type).get(
(c.type.id - 1 - 1) % max_id + 1)
c.next_type = db.pokedex_session.query(tables.Type).get(
(c.type.id - 1 + 1) % max_id + 1)
return self.cache_content(
key=c.type.name,
template='/pokedex/type.mako',
do_work=self._do_types,
)
def _do_types(self, name):
# Eagerload a bit of type stuff
db.pokedex_session.query(tables.Type) \
.filter_by(id=c.type.id) \
.options(
subqueryload('damage_efficacies'),
joinedload('damage_efficacies.target_type'),
subqueryload('target_efficacies'),
joinedload('target_efficacies.damage_type'),
) \
.one()
c.moves = db.pokedex_session.query(tables.Move) \
.filter_by(type_id=c.type.id) \
.order_by(tables.Move.name.asc()) \
.options(
joinedload('damage_class'),
joinedload('generation'),
joinedload('move_effect'),
joinedload('type'),
)
c.pokemon = db.pokedex_session.query(tables.Pokemon) \
.join(tables.PokemonType) \
.filter(tables.PokemonType.type_id == c.type.id) \
.options(
subqueryload('abilities'),
subqueryload('egg_groups'),
subqueryload('types'),
subqueryload_all('stats.stat'),
)
c.pokemon = sorted(c.pokemon, key=lambda (pokemon): (pokemon.national_id, pokemon.forme_name))
return
def abilities_list(sef):
c.abilities = db.pokedex_session.query(tables.Ability) \
.order_by(tables.Ability.id) \
.all()
return render('/pokedex/ability_list.mako')
def abilities(self, name):
try:
c.ability = db.get_by_name(tables.Ability, name)
except NoResultFound:
return self._not_found()
### Prev/next for header
max_id = db.pokedex_session.query(tables.Ability).count()
c.prev_ability = db.pokedex_session.query(tables.Ability).get(
(c.ability.id - 1 - 1) % max_id + 1)
c.next_ability = db.pokedex_session.query(tables.Ability).get(
(c.ability.id - 1 + 1) % max_id + 1)
return self.cache_content(
key=c.ability.name,
template='/pokedex/ability.mako',
do_work=self._do_ability,
)
def _do_ability(self, name):
# Eagerload
db.pokedex_session.query(tables.Ability) \
.filter_by(id=c.ability.id) \
.options(
subqueryload('foreign_names'),
joinedload('foreign_names.language'),
subqueryload('flavor_text'),
joinedload('flavor_text.version_group'),
joinedload('flavor_text.version_group.versions'),
) \
.one()
c.pokemon = db.pokedex_session.query(tables.Pokemon) \
.join(tables.PokemonAbility) \
.filter(tables.PokemonAbility.ability_id == c.ability.id) \
.options(
subqueryload('abilities'),
subqueryload('egg_groups'),
subqueryload('types'),
subqueryload_all('stats.stat'),
)
c.pokemon = sorted(c.pokemon, key=lambda (pokemon): (pokemon.national_id, pokemon.forme_name))
return
def items_list(self):
c.item_pockets = db.pokedex_session.query(tables.ItemPocket) \
.order_by(tables.ItemPocket.id.asc())
return render('/pokedex/item_list.mako')
def item_pockets(self, pocket):
try:
c.item_pocket = db.pokedex_session.query(tables.ItemPocket) \
.filter(tables.ItemPocket.identifier == pocket) \
.options(eagerload_all('categories.items.berry')) \
.one()
except NoResultFound:
# It's possible this is an old item URL; redirect if so
try:
item = db.get_by_name(tables.Item, pocket)
return redirect(
url(controller='dex', action='items',
pocket=item.pocket.identifier, name=pocket),
)
except NoResultFound:
return self._not_found()
# OK, got a valid pocket
# Eagerload TM info if it's actually needed
if c.item_pocket.identifier == u'machines':
db.pokedex_session.query(tables.ItemPocket) \
.options(eagerload_all('categories.items.machines.move.type')) \
.get(c.item_pocket.id)
c.item_pockets = db.pokedex_session.query(tables.ItemPocket) \
.order_by(tables.ItemPocket.id.asc())
return render('/pokedex/item_pockets.mako')
def items(self, pocket, name):
try:
c.item = db.get_by_name(tables.Item, name)
except NoResultFound:
return self._not_found()
# These are used for their item linkage
c.growth_mulch = db.pokedex_session.query(tables.Item) \
.filter_by(name=u'Growth Mulch').one()
c.damp_mulch = db.pokedex_session.query(tables.Item) \
.filter_by(name=u'Damp Mulch').one()
# Pokémon that can hold this item are per version; break this up into a
# two-dimensional structure of pokemon => version => rarity
c.holding_pokemon = defaultdict(lambda: defaultdict(int))
held_generations = set()
for pokemon_item in c.item.pokemon:
c.holding_pokemon[pokemon_item.pokemon][pokemon_item.version] = pokemon_item.rarity
held_generations.add(pokemon_item.version.generation)
# Craft a list of versions, collapsed into columns, grouped by gen
held_generations = sorted(held_generations, key=lambda gen: gen.id)
c.held_version_columns = []
for generation in held_generations:
# Oh boy! More version collapsing logic!
# Try to make this as simple as possible: have a running list of
# versions in some column, then switch to a new column when any
# rarity changes
c.held_version_columns.append( [[]] ) # New colgroup, empty column
last_version = None
for version in generation.versions:
# If the any of the rarities changed, this version needs to
# begin a new column
if last_version and any(
rarities[last_version] != rarities[version]
for rarities in c.holding_pokemon.values()
):
c.held_version_columns[-1].append([])
c.held_version_columns[-1][-1].append(version)
last_version = version
return render('/pokedex/item.mako')
def locations(self, name):
# Note that it isn't against the rules for multiple locations to have
# the same name. To avoid complications, the name is stored in
# c.location_name, and after that we only deal with areas.
c.locations = db.pokedex_session.query(tables.Location) \
.filter(func.lower(tables.Location.name) == name) \
.all()
if not c.locations:
return self._not_found()
c.location_name = c.locations[0].name
# TODO: Sort locations/areas by generation
# Get all the areas in any of these locations
c.areas = []
for location in c.locations:
c.areas.extend(location.areas)
c.areas.sort(key=lambda area: area.name)
# For the most part, our data represents exactly what we're going to
# show. For a given area in a given game, this Pokémon is guaranteed
# to appear some x% of the time no matter what the state of the world
# is, and various things like swarms or the radar may add on to this
# percentage.
# Encounters are grouped by area -- <h2>s.
# Then by terrain -- table sections.
# Then by pokemon -- table rows.
# Then by version -- table columns.
# Finally, condition values associated with levels/rarity.
q = db.pokedex_session.query(tables.Encounter) \
.options(
eagerload_all('condition_value_map.condition_value'),
eagerload_all('slot.terrain'),
eagerload('pokemon'),
eagerload('version'),
) \
.filter(tables.Encounter.location_area_id.in_(_.id for _ in c.areas))
# area => terrain => pokemon => version => condition =>
# condition_values => encounter_bits
grouped_encounters = defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(
list
)
)
)
)
)
)
# To avoid an ultra-wide table when not necessary, only *generations*
# that actually contain this Pokémon should appear.
# So if the Pokémon appears in Kanto in Crystal, show all of G/S/C. If
# it doesn't appear in any of the three, show none of them.
# Last but not least, show generations in reverse order, so the more
# important (i.e., recent) versions are on the left.
# Got all that?
area_generations = defaultdict(set)
for encounter in q.all():
# Fetches the list of encounters that match this region, version,
# terrain, etc.
# n.b.: conditions and values must be tuples because lists aren't
# hashable.
encounter_bits = grouped_encounters \
[encounter.location_area] \
[encounter.slot.terrain] \
[encounter.pokemon] \
[encounter.version] \
[ tuple(cv.condition for cv in encounter.condition_values) ] \
[ tuple(encounter.condition_values) ]
# Combine "level 3-4, 50%" and "level 3-4, 20%" into "level 3-4, 70%".
existing_encounter = filter(lambda enc: enc['min_level'] == encounter.min_level
and enc['max_level'] == encounter.max_level,
encounter_bits)
if existing_encounter:
existing_encounter[0]['rarity'] += encounter.slot.rarity
else:
encounter_bits.append({
'min_level': encounter.min_level,
'max_level': encounter.max_level,
'rarity': encounter.slot.rarity,
})
# Remember that this generation appears in this area
area_generations[encounter.location_area].add(encounter.version.version_group.generation)
c.grouped_encounters = grouped_encounters
# Pass some data/functions
c.encounter_terrain_icons = self.encounter_terrain_icons
c.encounter_condition_value_icons = self.encounter_condition_value_icons
c.level_range = level_range
# See above. Versions for each major group are those that are part of
# a generation where this Pokémon appears -- in reverse generation
# order.
c.group_versions = defaultdict(list)
for area, generations in area_generations.items():
for version_group in area.location.region.version_groups:
if version_group.generation not in generations:
continue
c.group_versions[area][0:0] = version_group.versions
return render('/pokedex/location.mako')
def natures_list(self):
c.natures = db.pokedex_session.query(tables.Nature)
# Figure out sort order
c.sort_order = request.params.get('sort', None)
if c.sort_order == u'stat':
# Sort neutral natures first, sorted by name, then the others in
# stat order
c.natures = c.natures.order_by(
(tables.Nature.increased_stat_id
== tables.Nature.decreased_stat_id).desc(),
tables.Nature.increased_stat_id.asc(),
tables.Nature.decreased_stat_id.asc(),
)
else:
c.natures = c.natures.order_by(tables.Nature.name.asc())
return render('/pokedex/nature_list.mako')
def natures(self, name):
try:
c.nature = db.get_by_name(tables.Nature, name)
except NoResultFound:
return self._not_found()
# Find related natures.
# Other neutral natures if this one is neutral; otherwise, the inverse
# of this one
if c.nature.increased_stat == c.nature.decreased_stat:
c.neutral_natures = db.pokedex_session.query(tables.Nature) \
.filter(tables.Nature.increased_stat_id
== tables.Nature.decreased_stat_id) \
.filter(tables.Nature.id != c.nature.id) \
.order_by(tables.Nature.name)
else:
c.inverse_nature = db.pokedex_session.query(tables.Nature) \
.filter_by(
increased_stat_id=c.nature.decreased_stat_id,
decreased_stat_id=c.nature.increased_stat_id,
) \
.one()
# Find appropriate example Pokémon.
# Arbitrarily decided that these are Pokémon for which:
# - their best and worst stats are at least 10 apart
# - their best stat is improved by this nature
# - their worst stat is hindered by this nature
# Of course, if this is a neutral nature, then find only Pokémon for
# which the best and worst stats are close together.
# The useful thing here is that this cannot be done in the Pokémon
# search, as it requires comparing a Pokémon's stats to themselves.
# Also, HP doesn't count. Durp.
hp = db.pokedex_session.query(tables.Stat).filter_by(name=u'HP').one()
if c.nature.increased_stat == c.nature.decreased_stat:
# Neutral. Boring!
# Create a subquery of neutral-ish Pokémon
stat_subquery = db.pokedex_session.query(
tables.PokemonStat.pokemon_id
) \
.filter(tables.PokemonStat.stat_id != hp.id) \
.group_by(tables.PokemonStat.pokemon_id) \
.having(
func.max(tables.PokemonStat.base_stat)
- func.min(tables.PokemonStat.base_stat)
<= 10
) \
.subquery()
c.pokemon = db.pokedex_session.query(tables.Pokemon) \
.join((stat_subquery,
stat_subquery.c.pokemon_id == tables.Pokemon.id))
else:
# More interesting.
# Create the subquery again, but.. the other way around.
grouped_stats = aliased(tables.PokemonStat)
stat_range_subquery = db.pokedex_session.query(
grouped_stats.pokemon_id,
func.max(grouped_stats.base_stat).label('max_stat'),
func.min(grouped_stats.base_stat).label('min_stat'),
) \
.filter(grouped_stats.stat_id != hp.id) \
.group_by(grouped_stats.pokemon_id) \
.having(
func.max(grouped_stats.base_stat)
- func.min(grouped_stats.base_stat)
> 10
) \
.subquery()
# Also need to join twice more to PokemonStat to figure out WHICH
# of those stats is the max or min. So, yes, joining to the same
# table three times and two deep. One to make sure the Pokémon has
# the right lowest stat; one to make sure it has the right highest
# stat.
# Note that I really want to do: range --> min; --> max
# But SQLAlchemy won't let me start from a subquery like that, so
# instead I do min --> range --> max. :( Whatever.
min_stats = aliased(tables.PokemonStat)
max_stats = aliased(tables.PokemonStat)
minmax_stat_subquery = db.pokedex_session.query(
min_stats
) \
.join((stat_range_subquery, and_(
min_stats.base_stat == stat_range_subquery.c.min_stat,
min_stats.pokemon_id == stat_range_subquery.c.pokemon_id,
)
)) \
.join((max_stats, and_(
max_stats.base_stat == stat_range_subquery.c.max_stat,
max_stats.pokemon_id == stat_range_subquery.c.pokemon_id,
)
)) \
.filter(min_stats.stat_id == c.nature.decreased_stat_id) \
.filter(max_stats.stat_id == c.nature.increased_stat_id) \
.subquery()
# Finally, just join that mess to pokemon; INNER-ness will do all
# the filtering
c.pokemon = db.pokedex_session.query(tables.Pokemon) \
.join((minmax_stat_subquery,
minmax_stat_subquery.c.pokemon_id == tables.Pokemon.id))
# Order by id as per usual
c.pokemon = c.pokemon.order_by(tables.Pokemon.id.asc())
return render('/pokedex/nature.mako')
|
Sanqui/spline-pokedex
|
splinext/pokedex/controllers/pokedex.py
|
Python
|
mit
| 78,359
|
[
"CRYSTAL"
] |
7e450e46e0a43d59260ab0bd545fd0e105d2b13e8f0b4bc57090263bcb85ce7f
|
########################################################################
# $HeadURL$
# File : BOINCComputingElement.py
# Author : J.Wu
########################################################################
""" BOINC Computing Element
"""
__RCSID__ = "$Id$"
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
from DIRAC import S_OK, S_ERROR
import os, bz2, base64, tempfile
from urlparse import urlparse
CE_NAME = 'BOINC'
class BOINCComputingElement( ComputingElement ):
###############################################################################
def __init__( self, ceUniqueID ):
""" Standard constructor.
"""
ComputingElement.__init__( self, ceUniqueID )
self.ceType = CE_NAME
self.mandatoryParameters = []
self.wsdl = None
self.BOINCClient = None
#define a job prefix based on the wsdl url
self.suffix = None
# this is for standlone test
# self.ceParameters['projectURL'] = 'http://mardirac3.in2p3.fr:7788/?wsdl'
# self.ceParameters['Platform'] = 'Linux_x86_64_glibc-2.5'
###############################################################################
def createClient( self ):
"""
This method only can be called after the initialisation of this class. In this
method, it will initial some variables and create a soap client for communication
with BOINC server.
"""
if not self.wsdl:
self.wsdl = self.ceParameters['projectURL']
if not self.suffix:
result = urlparse(self.wsdl)
self.suffix = result.hostname
if not self.BOINCClient:
try:
from suds.client import Client
import logging
logging.basicConfig(format="%(asctime)-15s %(message)s")
self.BOINCClient = Client(self.wsdl)
except Exception,x:
self.log.error( 'Creation of the soap client failed', '%s' % str( x ) )
pass
###############################################################################
def submitJob( self, executableFile, proxy = None, numberOfJobs = 1 ):
""" Method to submit job
"""
self.createClient( )
# Check if the client is ready
if not self.BOINCClient:
return S_ERROR( 'Soap client is not ready' )
self.log.verbose( "Executable file path: %s" % executableFile )
# if no proxy is supplied, the executable can be submitted directly
# otherwise a wrapper script is needed to get the proxy to the execution node
# The wrapper script makes debugging more complicated and thus it is
# recommended to transfer a proxy inside the executable if possible.
wrapperContent = ''
if proxy:
self.log.verbose( 'Setting up proxy for payload' )
compressedAndEncodedProxy = base64.encodestring( bz2.compress( proxy.dumpAllToString()['Value'] ) ).replace( '\n', '' )
compressedAndEncodedExecutable = base64.encodestring( bz2.compress( open( executableFile, "rb" ).read(), 9 ) ).replace( '\n', '' )
wrapperContent = """#!/bin/bash
/usr/bin/env python << EOF
# Wrapper script for executable and proxy
import os, tempfile, sys, base64, bz2, shutil
try:
workingDirectory = tempfile.mkdtemp( suffix = '_wrapper', prefix= 'TORQUE_' )
os.chdir( workingDirectory )
open( 'proxy', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedProxy)s" ) ) )
open( '%(executable)s', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedExecutable)s" ) ) )
os.chmod('proxy',0600)
os.chmod('%(executable)s',0700)
os.environ["X509_USER_PROXY"]=os.path.join(workingDirectory, 'proxy')
except Exception, x:
print >> sys.stderr, x
sys.exit(-1)
cmd = "./%(executable)s"
print 'Executing: ', cmd
sys.stdout.flush()
os.system( cmd )
shutil.rmtree( workingDirectory )
EOF
""" % { 'compressedAndEncodedProxy': compressedAndEncodedProxy, \
'compressedAndEncodedExecutable': compressedAndEncodedExecutable, \
'executable': os.path.basename( executableFile ) }
fd, name = tempfile.mkstemp( suffix = '_pilotwrapper.py', prefix = 'DIRAC_', dir = os.getcwd() )
os.close( fd )
submitFile = name
else: # no proxy
submitFile = executableFile
wrapperContent = self._fromFileToStr( submitFile )
if not wrapperContent:
self.log.error( 'Executable file is empty.' )
return S_ERROR( 'Executable file is empty.' )
#Some special symbol can not be transported by xml,
#such as less, greater, amp. So, base64 is used here.
wrapperContent = base64.encodestring( wrapperContent ).replace( "\n",'')
prefix = os.path.splitext( os.path.basename( submitFile ) )[0].replace( '_pilotwrapper', '' ).replace( 'DIRAC_', '' )
batchIDList = []
stampDict = {}
for i in range( 0, numberOfJobs ):
jobID = "%s_%d@%s" % ( prefix, i, self.suffix)
try:
# print jobID + "\n" + wrapperContent
# print self.BOINCClient
result = self.BOINCClient.service.submitJob( jobID, wrapperContent,self.ceParameters['Platform'][0], self.ceParameters['MarketPlaceID'] )
except:
self.log.error( 'Could not submit the pilot to the BOINC CE',
'Pilot %s, BOINC CE %s' % (jobID, self.wsdl ))
break;
if not result['ok']:
self.log.warn( 'Didn\'t submit the pilot %s to the BOINC CE %s, the value returned is false!' % (jobID,
self.wsdl ))
break;
self.log.verbose( 'Submit the pilot %s to the BONIC CE %s' % (jobID, self.wsdl) )
diracStamp = "%s_%d" % ( prefix, i )
batchIDList.append( jobID )
stampDict[jobID] = diracStamp
if batchIDList:
resultRe = S_OK( batchIDList )
resultRe['PilotStampDict'] = stampDict
else:
resultRe = S_ERROR('Submit no pilot to BOINC CE %s' % self.wsdl)
return resultRe
#############################################################################
def getCEStatus( self ):
""" Method to get the BONIC CE dynamic jobs information.
"""
self.createClient( )
# Check if the client is ready
if not self.BOINCClient:
self.log.error( 'Soap client is not ready.' )
return S_ERROR( 'Soap client is not ready.' )
try:
result = self.BOINCClient.service.getDynamicInfo( )
except:
self.log.error( 'Could not get the BOINC CE dynamic jobs information', self.wsdl )
return S_ERROR( 'Could not get the BOINC CE %s dynamic jobs information, communication failed!' % self.wsdl )
if not result['ok']:
self.log.warn( 'Did not get the BONIC CE %s dynamic jobs information, the value returned is false!' % self.wsdl )
return S_ERROR( 'Did not get the BONIC CE %s dynamic jobs information, the value returned is false!' % self.wsdl )
self.log.verbose( 'Get the BOINC CE %s dynamic jobs info.' % self.wsdl )
resultRe = S_OK()
resultRe['WaitingJobs'] = result['values'][0][0]
resultRe['RunningJobs'] = result['values'][0][1]
resultRe['SubmittedJobs'] = 0
self.log.verbose( 'Waiting Jobs: ', resultRe['WaitingJobs'] )
self.log.verbose( 'Running Jobs: ', resultRe['RunningJobs'] )
return resultRe
#############################################################################
def getJobStatus( self, jobIDList ):
""" Get the status information about jobs in the given list
"""
self.createClient( )
# Check if the client is ready
if not self.BOINCClient:
self.log.error( 'Soap client is not ready.' )
return S_ERROR( 'Soap client is not ready.' )
wsdl_jobIDList = self.BOINCClient.factory.create( 'stringArray' )
for job in jobIDList :
try:
job = job.split("@")[0]
except:
self.log.debug("The job id is %s" % job)
pass
wsdl_jobIDList[0].append( job )
try:
result = self.BOINCClient.service.getJobStatus( wsdl_jobIDList )
except:
self.log.error( 'Could not get the status about jobs in the list from the BONIC CE', self.wsdl )
return S_ERROR( 'Could not get the status about jobs in the list from the BONIC CE %s, commnication failed!' % self.wsdl )
if not result['ok']:
self.log.warn( 'Did not get the status about jobs in the list from the BONIC CE %s, the value returned is false!' % self.wsdl )
return S_ERROR( 'Did not get the status about jobs in the list from the BONIC CE %s, the value returned is false!' % self.wsdl )
self.log.debug( 'Got the status about jobs in list from the BOINC CE %s.' % self.wsdl )
resultRe = { }
for jobStatus in result['values'][0]:
(jobID, status) = jobStatus.split(":")
jobID = "%s@%s" % ( jobID, self.suffix)
resultRe[jobID] = status
return S_OK( resultRe )
#############################################################################
def getJobOutput( self, jobID, localDir = None ):
""" Get the stdout and stderr outputs of the specified job . If the localDir is provided,
the outputs are stored as files in this directory and the name of the files are returned.
Otherwise, the outputs are returned as strings.
"""
self.createClient( )
# Check if the client is ready
if not self.BOINCClient:
self.log.error( 'Soap client is not ready.' )
return S_ERROR( 'Soap client is not ready.' )
try:
tempID = jobID.split("@")[0]
except:
tempID = jobID
try:
result = self.BOINCClient.service.getJobOutput( tempID )
except:
self.log.error( 'Could not get the outputs of job from the BONIC CE',
'Job %s, BOINC SE' % (jobID, self.wsdl) )
return S_ERROR( 'Could not get the outputs of job %s from the BONIC CE %s, commnication failed!' % (jobID, self.wsdl) )
if not result['ok']:
self.log.warn( 'Did not get the outputs of job %s from the BONIC CE %s, the value returned is false!' % (jobID, self.wsdl) )
return S_ERROR( 'Did not get the outputs of job %s from the BONIC CE %s, the value returend is false!' % (jobID, self.wsdl) )
self.log.debug( 'Got the outputs of job %s from the BONIC CE %s.' % (jobID, self.wsdl) )
strOutfile = base64.decodestring( result['values'][0][0] )
strErrorfile = base64.decodestring( result['values'][0][1] )
if localDir:
outFile = os.path.join( localDir, 'BOINC_%s.out' % jobID )
self._fromStrToFile( strOutfile, outFile )
errorFile = os.path.join( localDir, 'BOINC_%s.err' % jobID )
self._fromStrToFile( strErrorfile, errorFile )
return S_OK( ( outFile, errorFile ) )
else:
# Return the outputs as a string
return S_OK( ( strOutfile, strErrorfile ) )
##############################################################################
def _fromFileToStr(self, fileName ):
""" Read a file and return the file content as a string
"""
strFile = ''
if( os.path.exists( fileName )):
try:
fileHander = open ( fileName, "r" )
strFile = fileHander.read ( )
except:
self.log.verbose( "To read file %s failed!\n" % fileName)
pass
finally:
if fileHander:
fileHander.close( )
return strFile
#####################################################################
def _fromStrToFile(self, strContent, fileName ):
""" Write a string to a file
"""
try:
fileHandler = open ( fileName, "w" )
fileHandler.write ( strContent )
except:
self.log.verbose( "To create %s failed!" % fileName )
pass
finally:
if fileHandler:
fileHandler.close( )
# testing this
if __name__ == "__main__":
test_boinc = BOINCComputingElement( 12 )
test_submit = 1
test_getStatus = 2
test_getDynamic = 4
test_getOutput = 8
test_parameter = 4
jobID = 'zShvbK_0@mardirac3.in2p3.fr'
if test_parameter & test_submit:
fd, fname = tempfile.mkstemp( suffix = '_pilotwrapper.py', prefix = 'DIRAC_', dir = "/home/client/dirac/data/" )
os.close( fd )
fd = open ( fname,"w" )
fd.write('#!/usr/bin/env sh\necho \"I am stadard out\" >&1 \necho \"I am stadard error\" >&2 ')
fd.close()
result = test_boinc.submitJob( fname )
if not result['OK']:
print result['Message']
else:
jobID = result['Value'][0]
print "Successfully submit a job %s" % jobID
if test_parameter & test_getStatus:
jobTestList = ["Uu0ghO_0@mardirac3.in2p3.fr", "1aDmIf_0@mardirac3.in2p3.fr", jobID]
jobStatus = test_boinc.getJobStatus( jobTestList )
if not jobStatus['OK']:
print jobStatus['Message']
else:
for id_ in jobTestList:
print 'The status of the job %s is %s' % (id_, jobStatus['Value'][id_])
if test_parameter & test_getDynamic:
serverState = test_boinc.getCEStatus()
if not serverState['OK']:
print serverState['Message']
else:
print 'The number of jobs waiting is %s' % serverState['WaitingJobs']
print 'The number of jobs running is %s' % serverState['RunningJobs']
if test_parameter & test_getOutput:
outstate = test_boinc.getJobOutput( jobID, "/tmp/" )
if not outstate['OK']:
print outstate['Message']
else:
print "Please check the directory /tmp for the output and error files of job %s" % jobID
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
Sbalbp/DIRAC
|
Resources/Computing/BOINCComputingElement.py
|
Python
|
gpl-3.0
| 13,448
|
[
"DIRAC"
] |
0081fab1e16ad446d1eee0af26de5a0443a0380502a46dda965559ad07380226
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import numbers
import itertools
import collections
import numpy as np
from zarr.errors import (err_too_many_indices, err_boundscheck, err_negative_step,
err_vindex_invalid_selection)
def is_integer(x):
return isinstance(x, numbers.Integral)
def is_integer_array(x, ndim=None):
t = hasattr(x, 'shape') and hasattr(x, 'dtype') and x.dtype.kind in 'ui'
if ndim is not None:
t = t and len(x.shape) == ndim
return t
def is_bool_array(x, ndim=None):
t = hasattr(x, 'shape') and hasattr(x, 'dtype') and x.dtype == bool
if ndim is not None:
t = t and len(x.shape) == ndim
return t
def is_scalar(value, dtype):
if np.isscalar(value):
return True
if isinstance(value, tuple) and dtype.names and len(value) == len(dtype.names):
return True
return False
def normalize_integer_selection(dim_sel, dim_len):
# normalize type to int
dim_sel = int(dim_sel)
# handle wraparound
if dim_sel < 0:
dim_sel = dim_len + dim_sel
# handle out of bounds
if dim_sel >= dim_len or dim_sel < 0:
err_boundscheck(dim_len)
return dim_sel
ChunkDimProjection = collections.namedtuple(
'ChunkDimProjection',
('dim_chunk_ix', 'dim_chunk_sel', 'dim_out_sel')
)
"""A mapping from chunk to output array for a single dimension.
Parameters
----------
dim_chunk_ix
Index of chunk.
dim_chunk_sel
Selection of items from chunk array.
dim_out_sel
Selection of items in target (output) array.
"""
class IntDimIndexer(object):
def __init__(self, dim_sel, dim_len, dim_chunk_len):
# normalize
dim_sel = normalize_integer_selection(dim_sel, dim_len)
# store attributes
self.dim_sel = dim_sel
self.dim_len = dim_len
self.dim_chunk_len = dim_chunk_len
self.nitems = 1
def __iter__(self):
dim_chunk_ix = self.dim_sel // self.dim_chunk_len
dim_offset = dim_chunk_ix * self.dim_chunk_len
dim_chunk_sel = self.dim_sel - dim_offset
dim_out_sel = None
yield ChunkDimProjection(dim_chunk_ix, dim_chunk_sel, dim_out_sel)
def ceildiv(a, b):
return int(np.ceil(a / b))
class SliceDimIndexer(object):
def __init__(self, dim_sel, dim_len, dim_chunk_len):
# normalize
self.start, self.stop, self.step = dim_sel.indices(dim_len)
if self.step < 1:
err_negative_step()
# store attributes
self.dim_len = dim_len
self.dim_chunk_len = dim_chunk_len
self.nitems = max(0, ceildiv((self.stop - self.start), self.step))
self.nchunks = ceildiv(self.dim_len, self.dim_chunk_len)
def __iter__(self):
# figure out the range of chunks we need to visit
dim_chunk_ix_from = self.start // self.dim_chunk_len
dim_chunk_ix_to = ceildiv(self.stop, self.dim_chunk_len)
# iterate over chunks in range
for dim_chunk_ix in range(dim_chunk_ix_from, dim_chunk_ix_to):
# compute offsets for chunk within overall array
dim_offset = dim_chunk_ix * self.dim_chunk_len
dim_limit = min(self.dim_len, (dim_chunk_ix + 1) * self.dim_chunk_len)
# determine chunk length, accounting for trailing chunk
dim_chunk_len = dim_limit - dim_offset
if self.start < dim_offset:
# selection starts before current chunk
dim_chunk_sel_start = 0
remainder = (dim_offset - self.start) % self.step
if remainder:
dim_chunk_sel_start += self.step - remainder
# compute number of previous items, provides offset into output array
dim_out_offset = ceildiv((dim_offset - self.start), self.step)
else:
# selection starts within current chunk
dim_chunk_sel_start = self.start - dim_offset
dim_out_offset = 0
if self.stop > dim_limit:
# selection ends after current chunk
dim_chunk_sel_stop = dim_chunk_len
else:
# selection ends within current chunk
dim_chunk_sel_stop = self.stop - dim_offset
dim_chunk_sel = slice(dim_chunk_sel_start, dim_chunk_sel_stop, self.step)
dim_chunk_nitems = ceildiv((dim_chunk_sel_stop - dim_chunk_sel_start),
self.step)
dim_out_sel = slice(dim_out_offset, dim_out_offset + dim_chunk_nitems)
yield ChunkDimProjection(dim_chunk_ix, dim_chunk_sel, dim_out_sel)
def check_selection_length(selection, shape):
if len(selection) > len(shape):
err_too_many_indices(selection, shape)
def replace_ellipsis(selection, shape):
selection = ensure_tuple(selection)
# count number of ellipsis present
n_ellipsis = sum(1 for i in selection if i is Ellipsis)
if n_ellipsis > 1:
# more than 1 is an error
raise IndexError("an index can only have a single ellipsis ('...')")
elif n_ellipsis == 1:
# locate the ellipsis, count how many items to left and right
n_items_l = selection.index(Ellipsis) # items to left of ellipsis
n_items_r = len(selection) - (n_items_l + 1) # items to right of ellipsis
n_items = len(selection) - 1 # all non-ellipsis items
if n_items >= len(shape):
# ellipsis does nothing, just remove it
selection = tuple(i for i in selection if i != Ellipsis)
else:
# replace ellipsis with as many slices are needed for number of dims
new_item = selection[:n_items_l] + ((slice(None),) * (len(shape) - n_items))
if n_items_r:
new_item += selection[-n_items_r:]
selection = new_item
# fill out selection if not completely specified
if len(selection) < len(shape):
selection += (slice(None),) * (len(shape) - len(selection))
# check selection not too long
check_selection_length(selection, shape)
return selection
def replace_lists(selection):
return tuple(
np.asarray(dim_sel) if isinstance(dim_sel, list) else dim_sel
for dim_sel in selection
)
def ensure_tuple(v):
if not isinstance(v, tuple):
v = (v,)
return v
ChunkProjection = collections.namedtuple(
'ChunkProjection',
('chunk_coords', 'chunk_selection', 'out_selection')
)
"""A mapping of items from chunk to output array. Can be used to extract items from the
chunk array for loading into an output array. Can also be used to extract items from a
value array for setting/updating in a chunk array.
Parameters
----------
chunk_coords
Indices of chunk.
chunk_selection
Selection of items from chunk array.
out_selection
Selection of items in target (output) array.
"""
def is_slice(s):
return isinstance(s, slice)
def is_contiguous_slice(s):
return is_slice(s) and (s.step is None or s.step == 1)
def is_positive_slice(s):
return is_slice(s) and (s.step is None or s.step >= 1)
def is_contiguous_selection(selection):
selection = ensure_tuple(selection)
return all([
(is_integer_array(s) or is_contiguous_slice(s) or s == Ellipsis)
for s in selection
])
def is_basic_selection(selection):
selection = ensure_tuple(selection)
return all([is_integer(s) or is_positive_slice(s) for s in selection])
# noinspection PyProtectedMember
class BasicIndexer(object):
def __init__(self, selection, array):
# handle ellipsis
selection = replace_ellipsis(selection, array._shape)
# setup per-dimension indexers
dim_indexers = []
for dim_sel, dim_len, dim_chunk_len in \
zip(selection, array._shape, array._chunks):
if is_integer(dim_sel):
dim_indexer = IntDimIndexer(dim_sel, dim_len, dim_chunk_len)
elif is_slice(dim_sel):
dim_indexer = SliceDimIndexer(dim_sel, dim_len, dim_chunk_len)
else:
raise IndexError('unsupported selection item for basic indexing; '
'expected integer or slice, got {!r}'
.format(type(dim_sel)))
dim_indexers.append(dim_indexer)
self.dim_indexers = dim_indexers
self.shape = tuple(s.nitems for s in self.dim_indexers
if not isinstance(s, IntDimIndexer))
self.drop_axes = None
def __iter__(self):
for dim_projections in itertools.product(*self.dim_indexers):
chunk_coords = tuple(p.dim_chunk_ix for p in dim_projections)
chunk_selection = tuple(p.dim_chunk_sel for p in dim_projections)
out_selection = tuple(p.dim_out_sel for p in dim_projections
if p.dim_out_sel is not None)
yield ChunkProjection(chunk_coords, chunk_selection, out_selection)
class BoolArrayDimIndexer(object):
def __init__(self, dim_sel, dim_len, dim_chunk_len):
# check number of dimensions
if not is_bool_array(dim_sel, 1):
raise IndexError('Boolean arrays in an orthogonal selection must '
'be 1-dimensional only')
# check shape
if dim_sel.shape[0] != dim_len:
raise IndexError('Boolean array has the wrong length for dimension; '
'expected {}, got {}'.format(dim_len, dim_sel.shape[0]))
# store attributes
self.dim_sel = dim_sel
self.dim_len = dim_len
self.dim_chunk_len = dim_chunk_len
self.nchunks = ceildiv(self.dim_len, self.dim_chunk_len)
# precompute number of selected items for each chunk
self.chunk_nitems = np.zeros(self.nchunks, dtype='i8')
for dim_chunk_ix in range(self.nchunks):
dim_offset = dim_chunk_ix * self.dim_chunk_len
self.chunk_nitems[dim_chunk_ix] = np.count_nonzero(
self.dim_sel[dim_offset:dim_offset + self.dim_chunk_len]
)
self.chunk_nitems_cumsum = np.cumsum(self.chunk_nitems)
self.nitems = self.chunk_nitems_cumsum[-1]
self.dim_chunk_ixs = np.nonzero(self.chunk_nitems)[0]
def __iter__(self):
# iterate over chunks with at least one item
for dim_chunk_ix in self.dim_chunk_ixs:
# find region in chunk
dim_offset = dim_chunk_ix * self.dim_chunk_len
dim_chunk_sel = self.dim_sel[dim_offset:dim_offset + self.dim_chunk_len]
# pad out if final chunk
if dim_chunk_sel.shape[0] < self.dim_chunk_len:
tmp = np.zeros(self.dim_chunk_len, dtype=bool)
tmp[:dim_chunk_sel.shape[0]] = dim_chunk_sel
dim_chunk_sel = tmp
# find region in output
if dim_chunk_ix == 0:
start = 0
else:
start = self.chunk_nitems_cumsum[dim_chunk_ix - 1]
stop = self.chunk_nitems_cumsum[dim_chunk_ix]
dim_out_sel = slice(start, stop)
yield ChunkDimProjection(dim_chunk_ix, dim_chunk_sel, dim_out_sel)
class Order:
UNKNOWN = 0
INCREASING = 1
DECREASING = 2
UNORDERED = 3
@staticmethod
def check(a):
diff = np.diff(a)
diff_positive = diff >= 0
n_diff_positive = np.count_nonzero(diff_positive)
all_increasing = n_diff_positive == len(diff_positive)
any_increasing = n_diff_positive > 0
if all_increasing:
order = Order.INCREASING
elif any_increasing:
order = Order.UNORDERED
else:
order = Order.DECREASING
return order
def wraparound_indices(x, dim_len):
loc_neg = x < 0
if np.any(loc_neg):
x[loc_neg] = x[loc_neg] + dim_len
def boundscheck_indices(x, dim_len):
if np.any(x < 0) or np.any(x >= dim_len):
err_boundscheck(dim_len)
class IntArrayDimIndexer(object):
"""Integer array selection against a single dimension."""
def __init__(self, dim_sel, dim_len, dim_chunk_len, wraparound=True, boundscheck=True,
order=Order.UNKNOWN):
# ensure 1d array
dim_sel = np.asanyarray(dim_sel)
if not is_integer_array(dim_sel, 1):
raise IndexError('integer arrays in an orthogonal selection must be '
'1-dimensional only')
# handle wraparound
if wraparound:
wraparound_indices(dim_sel, dim_len)
# handle out of bounds
if boundscheck:
boundscheck_indices(dim_sel, dim_len)
# store attributes
self.dim_len = dim_len
self.dim_chunk_len = dim_chunk_len
self.nchunks = ceildiv(self.dim_len, self.dim_chunk_len)
self.nitems = len(dim_sel)
# determine which chunk is needed for each selection item
# note: for dense integer selections, the division operation here is the
# bottleneck
dim_sel_chunk = dim_sel // dim_chunk_len
# determine order of indices
if order == Order.UNKNOWN:
order = Order.check(dim_sel)
self.order = order
if self.order == Order.INCREASING:
self.dim_sel = dim_sel
self.dim_out_sel = None
elif self.order == Order.DECREASING:
self.dim_sel = dim_sel[::-1]
# TODO should be possible to do this without creating an arange
self.dim_out_sel = np.arange(self.nitems - 1, -1, -1)
else:
# sort indices to group by chunk
self.dim_out_sel = np.argsort(dim_sel_chunk)
self.dim_sel = np.take(dim_sel, self.dim_out_sel)
# precompute number of selected items for each chunk
self.chunk_nitems = np.bincount(dim_sel_chunk, minlength=self.nchunks)
# find chunks that we need to visit
self.dim_chunk_ixs = np.nonzero(self.chunk_nitems)[0]
# compute offsets into the output array
self.chunk_nitems_cumsum = np.cumsum(self.chunk_nitems)
def __iter__(self):
for dim_chunk_ix in self.dim_chunk_ixs:
# find region in output
if dim_chunk_ix == 0:
start = 0
else:
start = self.chunk_nitems_cumsum[dim_chunk_ix - 1]
stop = self.chunk_nitems_cumsum[dim_chunk_ix]
if self.order == Order.INCREASING:
dim_out_sel = slice(start, stop)
else:
dim_out_sel = self.dim_out_sel[start:stop]
# find region in chunk
dim_offset = dim_chunk_ix * self.dim_chunk_len
dim_chunk_sel = self.dim_sel[start:stop] - dim_offset
yield ChunkDimProjection(dim_chunk_ix, dim_chunk_sel, dim_out_sel)
def slice_to_range(s, l):
return range(*s.indices(l))
def ix_(selection, shape):
"""Convert an orthogonal selection to a numpy advanced (fancy) selection, like numpy.ix_
but with support for slices and single ints."""
# normalisation
selection = replace_ellipsis(selection, shape)
# replace slice and int as these are not supported by numpy.ix_
selection = [slice_to_range(dim_sel, dim_len) if isinstance(dim_sel, slice)
else [dim_sel] if is_integer(dim_sel)
else dim_sel
for dim_sel, dim_len in zip(selection, shape)]
# now get numpy to convert to a coordinate selection
selection = np.ix_(*selection)
return selection
def oindex(a, selection):
"""Implementation of orthogonal indexing with slices and ints."""
selection = replace_ellipsis(selection, a.shape)
drop_axes = tuple([i for i, s in enumerate(selection) if is_integer(s)])
selection = ix_(selection, a.shape)
result = a[selection]
if drop_axes:
result = result.squeeze(axis=drop_axes)
return result
def oindex_set(a, selection, value):
selection = replace_ellipsis(selection, a.shape)
drop_axes = tuple([i for i, s in enumerate(selection) if is_integer(s)])
selection = ix_(selection, a.shape)
if not np.isscalar(value) and drop_axes:
value = np.asanyarray(value)
value_selection = [slice(None)] * len(a.shape)
for i in drop_axes:
value_selection[i] = np.newaxis
value_selection = tuple(value_selection)
value = value[value_selection]
a[selection] = value
# noinspection PyProtectedMember
class OrthogonalIndexer(object):
def __init__(self, selection, array):
# handle ellipsis
selection = replace_ellipsis(selection, array._shape)
# normalize list to array
selection = replace_lists(selection)
# setup per-dimension indexers
dim_indexers = []
for dim_sel, dim_len, dim_chunk_len in \
zip(selection, array._shape, array._chunks):
if is_integer(dim_sel):
dim_indexer = IntDimIndexer(dim_sel, dim_len, dim_chunk_len)
elif isinstance(dim_sel, slice):
dim_indexer = SliceDimIndexer(dim_sel, dim_len, dim_chunk_len)
elif is_integer_array(dim_sel):
dim_indexer = IntArrayDimIndexer(dim_sel, dim_len, dim_chunk_len)
elif is_bool_array(dim_sel):
dim_indexer = BoolArrayDimIndexer(dim_sel, dim_len, dim_chunk_len)
else:
raise IndexError('unsupported selection item for orthogonal indexing; '
'expected integer, slice, integer array or Boolean '
'array, got {!r}'
.format(type(dim_sel)))
dim_indexers.append(dim_indexer)
self.array = array
self.dim_indexers = dim_indexers
self.shape = tuple(s.nitems for s in self.dim_indexers
if not isinstance(s, IntDimIndexer))
self.is_advanced = not is_basic_selection(selection)
if self.is_advanced:
self.drop_axes = tuple([i for i, dim_indexer in enumerate(self.dim_indexers)
if isinstance(dim_indexer, IntDimIndexer)])
else:
self.drop_axes = None
def __iter__(self):
for dim_projections in itertools.product(*self.dim_indexers):
chunk_coords = tuple(p.dim_chunk_ix for p in dim_projections)
chunk_selection = tuple(p.dim_chunk_sel for p in dim_projections)
out_selection = tuple(p.dim_out_sel for p in dim_projections
if p.dim_out_sel is not None)
# handle advanced indexing arrays orthogonally
if self.is_advanced:
# N.B., numpy doesn't support orthogonal indexing directly as yet,
# so need to work around via np.ix_. Also np.ix_ does not support a
# mixture of arrays and slices or integers, so need to convert slices
# and integers into ranges.
chunk_selection = ix_(chunk_selection, self.array._chunks)
# special case for non-monotonic indices
if not is_basic_selection(out_selection):
out_selection = ix_(out_selection, self.shape)
yield ChunkProjection(chunk_coords, chunk_selection, out_selection)
class OIndex(object):
def __init__(self, array):
self.array = array
def __getitem__(self, selection):
fields, selection = pop_fields(selection)
selection = ensure_tuple(selection)
selection = replace_lists(selection)
return self.array.get_orthogonal_selection(selection, fields=fields)
def __setitem__(self, selection, value):
fields, selection = pop_fields(selection)
selection = ensure_tuple(selection)
selection = replace_lists(selection)
return self.array.set_orthogonal_selection(selection, value, fields=fields)
# noinspection PyProtectedMember
def is_coordinate_selection(selection, array):
return (
(len(selection) == len(array._shape)) and
all([is_integer(dim_sel) or is_integer_array(dim_sel)
for dim_sel in selection])
)
# noinspection PyProtectedMember
def is_mask_selection(selection, array):
return (
len(selection) == 1 and
is_bool_array(selection[0]) and
selection[0].shape == array._shape
)
# noinspection PyProtectedMember
class CoordinateIndexer(object):
def __init__(self, selection, array):
# some initial normalization
selection = ensure_tuple(selection)
selection = tuple([i] if is_integer(i) else i for i in selection)
selection = replace_lists(selection)
# validation
if not is_coordinate_selection(selection, array):
raise IndexError('invalid coordinate selection; expected one integer '
'(coordinate) array per dimension of the target array, '
'got {!r}'.format(selection))
# handle wraparound, boundscheck
for dim_sel, dim_len in zip(selection, array.shape):
# handle wraparound
wraparound_indices(dim_sel, dim_len)
# handle out of bounds
boundscheck_indices(dim_sel, dim_len)
# compute chunk index for each point in the selection
chunks_multi_index = tuple(
dim_sel // dim_chunk_len
for (dim_sel, dim_chunk_len) in zip(selection, array._chunks)
)
# broadcast selection - this will raise error if array dimensions don't match
selection = np.broadcast_arrays(*selection)
chunks_multi_index = np.broadcast_arrays(*chunks_multi_index)
# remember shape of selection, because we will flatten indices for processing
self.sel_shape = selection[0].shape if selection[0].shape else (1,)
# flatten selection
selection = [dim_sel.reshape(-1) for dim_sel in selection]
chunks_multi_index = [dim_chunks.reshape(-1) for dim_chunks in chunks_multi_index]
# ravel chunk indices
chunks_raveled_indices = np.ravel_multi_index(chunks_multi_index,
dims=array._cdata_shape)
# group points by chunk
if np.any(np.diff(chunks_raveled_indices) < 0):
# optimisation, only sort if needed
sel_sort = np.argsort(chunks_raveled_indices)
selection = tuple(dim_sel[sel_sort] for dim_sel in selection)
else:
sel_sort = None
# store attributes
self.selection = selection
self.sel_sort = sel_sort
self.shape = selection[0].shape if selection[0].shape else (1,)
self.drop_axes = None
self.array = array
# precompute number of selected items for each chunk
self.chunk_nitems = np.bincount(chunks_raveled_indices, minlength=array.nchunks)
self.chunk_nitems_cumsum = np.cumsum(self.chunk_nitems)
# locate the chunks we need to process
self.chunk_rixs = np.nonzero(self.chunk_nitems)[0]
# unravel chunk indices
self.chunk_mixs = np.unravel_index(self.chunk_rixs, dims=array._cdata_shape)
def __iter__(self):
# iterate over chunks
for i, chunk_rix in enumerate(self.chunk_rixs):
chunk_coords = tuple(m[i] for m in self.chunk_mixs)
if chunk_rix == 0:
start = 0
else:
start = self.chunk_nitems_cumsum[chunk_rix - 1]
stop = self.chunk_nitems_cumsum[chunk_rix]
if self.sel_sort is None:
out_selection = slice(start, stop)
else:
out_selection = self.sel_sort[start:stop]
chunk_offsets = tuple(
dim_chunk_ix * dim_chunk_len
for dim_chunk_ix, dim_chunk_len in zip(chunk_coords, self.array._chunks)
)
chunk_selection = tuple(
dim_sel[start:stop] - dim_chunk_offset
for (dim_sel, dim_chunk_offset) in zip(self.selection, chunk_offsets)
)
yield ChunkProjection(chunk_coords, chunk_selection, out_selection)
# noinspection PyProtectedMember
class MaskIndexer(CoordinateIndexer):
def __init__(self, selection, array):
# some initial normalization
selection = ensure_tuple(selection)
selection = replace_lists(selection)
# validation
if not is_mask_selection(selection, array):
raise IndexError('invalid mask selection; expected one Boolean (mask)'
'array with the same shape as the target array, got {!r}'
.format(selection))
# convert to indices
selection = np.nonzero(selection[0])
# delegate the rest to superclass
super(MaskIndexer, self).__init__(selection, array)
class VIndex(object):
def __init__(self, array):
self.array = array
def __getitem__(self, selection):
fields, selection = pop_fields(selection)
selection = ensure_tuple(selection)
selection = replace_lists(selection)
if is_coordinate_selection(selection, self.array):
return self.array.get_coordinate_selection(selection, fields=fields)
elif is_mask_selection(selection, self.array):
return self.array.get_mask_selection(selection, fields=fields)
else:
err_vindex_invalid_selection(selection)
def __setitem__(self, selection, value):
fields, selection = pop_fields(selection)
selection = ensure_tuple(selection)
selection = replace_lists(selection)
if is_coordinate_selection(selection, self.array):
self.array.set_coordinate_selection(selection, value, fields=fields)
elif is_mask_selection(selection, self.array):
self.array.set_mask_selection(selection, value, fields=fields)
else:
err_vindex_invalid_selection(selection)
def check_fields(fields, dtype):
# early out
if fields is None:
return dtype
# check type
if not isinstance(fields, (str, list, tuple)):
raise IndexError("'fields' argument must be a string or list of strings; found "
"{!r}".format(type(fields)))
if fields:
if dtype.names is None:
raise IndexError("invalid 'fields' argument, array does not have any fields")
try:
if isinstance(fields, str):
# single field selection
out_dtype = dtype[fields]
else:
# multiple field selection
out_dtype = np.dtype([(f, dtype[f]) for f in fields])
except KeyError as e:
raise IndexError("invalid 'fields' argument, field not found: {!r}".format(e))
else:
return out_dtype
else:
return dtype
def check_no_multi_fields(fields):
if isinstance(fields, list):
if len(fields) == 1:
return fields[0]
elif len(fields) > 1:
raise IndexError('multiple fields are not supported for this operation')
return fields
def pop_fields(selection):
if isinstance(selection, str):
# single field selection
fields = selection
selection = ()
elif not isinstance(selection, tuple):
# single selection item, no fields
fields = None
# leave selection as-is
else:
# multiple items, split fields from selection items
fields = [f for f in selection if isinstance(f, str)]
fields = fields[0] if len(fields) == 1 else fields
selection = tuple(s for s in selection if not isinstance(s, str))
selection = selection[0] if len(selection) == 1 else selection
return fields, selection
|
alimanfoo/zarr
|
zarr/indexing.py
|
Python
|
mit
| 28,056
|
[
"VisIt"
] |
2eec5366063448353e764a56f44576d6233d79097013a9ebaab5cc5502ef3a85
|
"""
The Euclid Visible Instrument Image Simulator
=============================================
This file contains an image simulator for the Euclid VISible instrument.
The approximate sequence of events in the simulator is as follows:
#. Read in a configuration file, which defines for example,
detector characteristics (bias, dark and readout noise, gain,
plate scale and pixel scale, oversampling factor, exposure time etc.).
#. Read in another file containing charge trap definitions (for CTI modelling).
#. Read in a file defining the cosmic rays (trail lengths and cumulative distributions).
#. Read in CCD offset information, displace the image, and modify
the output file name to contain the CCD and quadrant information
(note that VIS has a focal plane of 6 x 6 detectors).
#. Read in a source list and determine the number of different object types.
#. Read in a file which assigns data to a given object index.
#. Load the PSF model (a 2D map with a given over sampling or field dependent maps).
#. Generate a finemap (oversampled image) for each object type. If an object
is a 2D image then calculate the shape tensor to be used for size scaling.
Each type of an object is then placed onto its own finely sampled finemap.
#. Loop over the number of exposures to co-add and for each object in the object catalog:
* determine the number of electrons an object should have by scaling the object's magnitude
with the given zeropoint and exposure time.
* determine whether the object lands on to the detector or not and if it is
a star or an extended source (i.e. a galaxy).
* if object is extended determine the size (using a size-magnitude relation) and scale counts,
convolve with the PSF, and finally overlay onto the detector according to its position.
* if object is a star, scale counts according to the derived
scaling (first step), and finally overlay onto the detector according to its position.
#. Apply calibration unit flux to mimic flat field exposures [optional].
#. Apply a multiplicative flat-field map to emulate pixel-to-pixel non-uniformity [optional].
#. Add a charge injection line (horizontal and/or vertical) [optional].
#. Add cosmic ray tracks onto the CCD with random positions but known distribution [optional].
#. Apply detector charge bleeding in column direction [optional].
#. Add constant dark current and background light from Zodiacal light [optional].
#. Include spatially uniform scattered light to the pixel grid [optional].
#. Add photon (Poisson) noise [optional]
#. Add cosmetic defects from an input file [optional].
#. Add pre- and overscan regions in the serial direction [optional].
#. Apply the CDM03 radiation damage model [optional].
#. Apply CCD273 non-linearity model to the pixel data [optional].
#. Add readout noise selected from a Gaussian distribution [optional].
#. Convert from electrons to ADUs using a given gain factor.
#. Add a given bias level and discretise the counts (the output is going to be in 16bit unsigned integers).
#. Finally the simulated image is converted to a FITS file, a WCS is assigned
and the output is saved to the current working directory.
.. Warning:: The code is still work in progress and new features are being added.
The code has been tested, but nevertheless bugs may be lurking in corners, so
please report any weird or inconsistent simulations to the author.
Dependencies
------------
This script depends on the following packages:
:requires: PyFITS (tested with 3.0.6)
:requires: NumPy (tested with 1.6.1)
:requires: numexpr (tested with 2.0.1)
:requires: SciPy (tested with 0.10.1)
:requires: vissim-python package
.. Note:: This class is not Python 3 compatible. For example, xrange does not exist
in Python 3 (but is used here for speed and memory consumption improvements).
In addition, at least the string formatting should be changed if moved to
Python 3.x.
Testing
-------
Before trying to run the code, please make sure that you have compiled the
cdm03.f90 Fortran code using f2py (f2py -c -m cdm03 cdm03.f90). For testing,
please run the SCIENCE section from the test.config as follows::
python simulator.py -c data/test.config -s TESTSCIENCE1X
This will produce an image representing VIS lower left (0th) quadrant. Because
noise and cosmic rays are randomised one cannot directly compare the science
outputs but we must rely on the outputs that are free from random effects.
In the data subdirectory there is a file called "nonoisenocrQ0_00_00testscience.fits",
which is the comparison image without any noise or cosmic rays. To test the functionality,
please divide your nonoise and no cosmic ray track output image with the on in the data
folder. This should lead to a uniformly unity image or at least very close given some
numerical rounding uncertainties, especially in the FFT convolution (which is float32 not
float64).
Benchmarking
------------
A minimal benchmarking has been performed using the TESTSCIENCE1X section of the test.config input file::
Galaxy: 26753/26753 intscale=177.489159281 size=0.0353116000387
6798 objects were place on the detector
real 4m14.008s
user 3m59.609s
sys 0m4.728s
These numbers have been obtained with my laptop (2.2 GHz Intel Core i7) with
64-bit Python 2.7.2 installation. Further speed testing can be performed using the cProfile module
as follows::
python -m cProfile -o vissim.profile simulator.py -c data/test.config -s TESTSCIENCE3X
and then analysing the results with e.g. RunSnakeRun.
.. Note: The result above was obtained with nominally sampled PSF, however, that is only good for
testing purposes. If instead one uses say three times over sampled PSF (TESTSCIENCE3x) then the
execution time rises significantly (to about 22 minutes). This is mostly due to the fact that convolution
becomes rather expensive when done in the finely sampled PSF domain.
Change Log
----------
:version: 1.3cosmos (a special version to generate a VIS COSMOS field)
Version and change logs::
0.1: pre-development backbone.
0.4: first version with most pieces together.
0.5: this version has all the basic features present, but not fully tested.
0.6: implemented pre/overscan, fixed a bug when an object was getting close to the upper right corner of an
image it was not overlaid correctly. Included multiplicative flat fielding effect (pixel non-uniformity).
0.7: implemented bleeding.
0.8: cleaned up the code and improved documentation. Fixed a bug related to checking if object falls on the CCD.
Improved the information that is being written to the FITS header.
0.9: fixed a problem with the CTI model swapping Q1 with Q2. Fixed a bug that caused the pre- and overscan to
be identical for each quadrant even though Q1 and 3 needs the regions to be mirrored.
1.0: First release. The code can now take an over sampled PSF and use that for convolutions. Implemented a WCS
to the header.
1.05: included an option to add flux from the calibration unit to allow flat field exposures to be generated.
Now scaled the number of cosmic rays with the exposure time so that 10s flats have an appropriate number
of cosmic ray tracks.
1.06: changed how stars are laid down on the CCD. Now the PSF is interpolated to a new coordinate grid in the
oversampled frame after which it is downsampled to the CCD grid. This should increase the centroiding
accuracy.
1.07: included an option to apply non-linearity model. Cleaned the documentation.
1.08: optimised some of the operations with numexpr (only a minor improvement).
1.1: Fixed a bug related to adding the system readout noise. In previous versions the readout noise was
being underestimated due to the fact that it was included as a variance not standard deviation.
1.2: Included a spatially uniform scattered light. Changed how the image pixel values are rounded before
deriving the Poisson noise.
Future Work
-----------
.. todo::
#. test that the cosmic rays are correctly implemented (looks like there are too many long trails and too few short)
#. check that the size distribution of galaxies is suitable (now the scaling is before convolution!)
#. objects.dat is now hard coded into the code, this should be read from the config file
#. implement spatially variable PSF
#. implement CCD offsets (for focal plane simulations)
#. test that the WCS is correctly implemented and allows CCD offsets
#. implement a Gaussian random draw for the size-magnitude distribution rather than a straight fit
#. centering of an object depends on the centering of the postage stamp (should recalculate the centroid)
#. charge injection line positions are now hardcoded to the code, read from the config file
#. include rotation in metrology
#. implement optional dithered offsets
#. try to further improve the convolution speed (look into fftw package)
Contact Information
-------------------
:author: Sami-Matias Niemi
:contact: s.niemi@ucl.ac.uk
"""
import os, sys, datetime, math, pprint
import ConfigParser
from optparse import OptionParser
import scipy
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy import ndimage
from scipy import signal
import pyfits as pf
import numpy as np
import numexpr as ne
from CTI import CTI
from support import logger as lg
from support import VISinstrumentModel
__author__ = 'Sami-Matias Niemi'
__version__ = '1.3cosmos'
class VISsimulator():
"""
Euclid Visible Instrument Image Simulator
The image that is being build is in::
self.image
:param opts: OptionParser instance
:type opts: OptionParser instance
"""
def __init__(self, opts):
"""
Class Constructor.
:param opts: OptionParser instance
:type opts: OptionParser instance
"""
self.configfile = opts.configfile
self.section = opts.section
self.debug = opts.debug
#load instrument model
self.information = VISinstrumentModel.VISinformation()
#update settings with defaults
self.information.update(dict(quadrant=int(opts.quadrant),
ccdx=int(opts.xCCD),
ccdy=int(opts.yCCD),
psfoversampling=3.0,
xsize=2048,
ysize=2066,
prescanx=50,
ovrscanx=20,
fullwellcapacity=200000,
dark=0.001,
readout=4.5,
bias=1000.0,
cosmic_bkgd=0.182758225257,
scattered_light=2.96e-2,
e_adu=3.1,
magzero=15182880871.225231,
exposures=1,
exptime=565.0,
ra=123.0,
dec=45.0,
flatflux='data/VIScalibrationUnitflux.fits',
cosmicraylengths='data/cdf_cr_length.dat',
cosmicraydistance='data/cdf_cr_total.dat',
flatfieldfile='data/VISFlatField2percent.fits',
trapfile='data/cdm_euclid.dat'))
#setup logger
self.log = lg.setUpLogger('VISsim.log')
def readConfigs(self):
"""
Reads the config file information using configParser.
"""
self.config = ConfigParser.RawConfigParser()
self.config.readfp(open(self.configfile))
def processConfigs(self):
"""
Processes configuration information and save the information to a dictionary self.information.
The configuration file may look as follows::
[TEST]
quadrant = 0
CCDx = 0
CCDy = 0
xsize = 2048
ysize = 2066
prescanx = 50
ovrscanx = 20
fullwellcapacity = 200000
dark = 0.001
readout = 4.5
bias = 1000.0
cosmic_bkgd = 0.182
e_ADU = 3.1
injection = 150000.0
magzero = 1.7059e10
exposures = 1
exptime = 565.0
RA = 145.95
DEC = -38.16
sourcelist = data/source_test.dat
PSFfile = data/interpolated_psf.fits
trapfile = data/cdm_euclid.dat
cosmeticsFile = data/cosmetics.dat
flatfieldfile = data/VISFlatField2percent.fits
output = test.fits
addSources = yes
noise = yes
cosmetics = no
chargeInjectionx = no
chargeInjectiony = no
radiationDamage = yes
cosmicRays = yes
overscans = yes
bleeding = yes
flatfieldM = yes
For explanation of each field, see /data/test.config.
"""
#parse options and update the information dictionary
options = self.config.options(self.section)
settings = {}
for option in options:
try:
settings[option] = self.config.getint(self.section, option)
except ValueError:
try:
settings[option] = self.config.getfloat(self.section, option)
except ValueError:
settings[option] = self.config.get(self.section, option)
self.information.update(settings)
#force gain to be float
self.information['e_adu'] = float(self.information['e_adu'])
#name of the output file, include quadrants and CCDs
self.information['output'] = 'Q%i_0%i_0%i%s' % (self.information['quadrant'],
self.information['ccdx'],
self.information['ccdy'],
self.config.get(self.section, 'output'))
#booleans to control the flow
self.chargeInjectionx = self.config.getboolean(self.section, 'chargeInjectionx')
self.chargeInjectiony = self.config.getboolean(self.section, 'chargeInjectiony')
self.cosmicRays = self.config.getboolean(self.section, 'cosmicRays')
self.noise = self.config.getboolean(self.section, 'noise')
self.cosmetics = self.config.getboolean(self.section, 'cosmetics')
self.radiationDamage = self.config.getboolean(self.section, 'radiationDamage')
self.addsources = self.config.getboolean(self.section, 'addSources')
self.bleeding = self.config.getboolean(self.section, 'bleeding')
self.overscans = self.config.getboolean(self.section, 'overscans')
#these don't need to be in the config file
try:
self.lampFlux = self.config.getboolean(self.section, 'lampFlux')
except:
self.lampFlux = False
try:
self.nonlinearity = self.config.getboolean(self.section, 'nonlinearity')
except:
self.nonlinearity = False
try:
self.flatfieldM = self.config.getboolean(self.section, 'flatfieldM')
except:
self.flatfieldM = False
try:
self.scatteredlight = self.config.getboolean(self.section, 'scatteredLight')
except:
self.scatteredlight = True
try:
self.readoutNoise = self.config.getboolean(self.section, 'readoutNoise')
except:
self.readoutNoise = True
self.information['variablePSF'] = False
self.booleans = dict(nonlinearity=self.nonlinearity,
flatfieldM=self.flatfieldM,
lampFlux=self.lampFlux,
chargeInjectionx=self.chargeInjectionx,
chargeInjectiony=self.chargeInjectiony,
cosmicRays=self.cosmicRays,
noise=self.noise,
cosmetics=self.cosmetics,
radiationDamage=self.radiationDamage,
addsources=self.addsources,
bleeding=self.bleeding,
overscans=self.overscans)
if self.debug:
pprint.pprint(self.information)
self.log.info('Using the following input values:')
for key, value in self.information.iteritems():
self.log.info('%s = %s' % (key, value))
self.log.info('Using the following booleans:')
for key, value in self.booleans.iteritems():
self.log.info('%s = %s' % (key, value))
def _createEmpty(self):
"""
Creates and empty array of a given x and y size full of zeros.
"""
self.image = np.zeros((self.information['ysize'], self.information['xsize']), dtype=np.float64)
def cosmicRayIntercepts(self, lum, x0, y0, l, phi):
"""
Derive cosmic ray streak intercept points.
:param lum: luminosities of the cosmic ray tracks
:param x0: central positions of the cosmic ray tracks in x-direction
:param y0: central positions of the cosmic ray tracks in y-direction
:param l: lengths of the cosmic ray tracks
:param phi: orientation angles of the cosmic ray tracks
:return: map
:rtype: nd-array
"""
#create empty array
crImage = np.zeros((self.information['ysize'], self.information['xsize']), dtype=np.float64)
#this is very slow way to do this
for cosmics in xrange(0, len(l)):
#delta x and y
dx = l[cosmics] * np.cos(phi[cosmics])
dy = l[cosmics] * np.sin(phi[cosmics])
#pixels in x-direction
ilo = np.floor(x0[cosmics] - l[cosmics])
if ilo < 1.:
ilo = 1
ihi = 1 + np.floor(x0[cosmics] + l[cosmics])
if ihi > self.information['xsize']:
ihi = self.information['xsize']
#pixels in y-directions
jlo = np.floor(y0[cosmics] - l[cosmics])
if jlo < 1.:
jlo = 1
jhi = 1 + np.floor(y0[cosmics] + l[cosmics])
if jhi > self.information['ysize']:
jhi = self.information['ysize']
u = []
x = []
y = []
n = 0 # count the intercepts
#Compute X intercepts on the pixel grid
if dx > 0.:
for j in xrange(int(ilo), int(ihi)):
ok = (j - x0[cosmics]) / dx
if np.abs(ok) <= 0.5:
n += 1
u.append(ok)
x.append(j)
y.append(y0[cosmics] + ok * dy)
#Compute Y intercepts on the pixel grid
if dy > 0.:
for j in xrange(int(jlo), int(jhi)):
ok = (j - y0[cosmics]) / dy
if np.abs(ok) <= 0.5:
n += 1
u.append(ok)
x.append(x0[cosmics] + ok * dx)
y.append(j)
#check if no intercepts were found
if n < 1:
i = np.floor(x0[cosmics])
j = np.floor(y0[cosmics])
crImage[j, i] += lum[cosmics]
#Find the arguments that sort the intersections along the track.
u = np.asarray(u)
x = np.asarray(x)
y = np.asarray(y)
args = np.argsort(u)
u = u[args]
x = x[args]
y = y[args]
#Decide which cell each interval traverses, and the path length.
for i in xrange(1, n - 1):
w = u[i + 1] - u[i]
cx = 1 + np.floor((x[i + 1] + x[i]) / 2.0)
cy = 1 + np.floor((y[i + 1] + y[i]) / 2.0)
if cx >= 0 and cx < self.information['xsize'] and cy >= 0 and cy < self.information['ysize']:
crImage[cy, cx] += (w * lum[cosmics])
return crImage
def readCosmicRayInformation(self):
"""
Reads in the cosmic ray track information from two input files.
Stores the information to a dictionary called cr.
"""
self.log.info('Reading in cosmic ray information from %s and %s' % (self.information['cosmicraylengths'],
self.information['cosmicraydistance']))
crLengths = np.loadtxt(self.information['cosmicraylengths'])
crDists = np.loadtxt(self.information['cosmicraydistance'])
self.cr = dict(cr_u=crLengths[:, 0], cr_cdf=crLengths[:, 1], cr_cdfn=np.shape(crLengths)[0],
cr_v=crDists[:, 0], cr_cde=crDists[:, 1], cr_cden=np.shape(crDists)[0])
def _writeFITSfile(self, image, filename):
"""
:param image: image array to save
:type image: ndarray
:param filename: name of the output file, e.g. file.fits
:type filename: str
:return: None
"""
if os.path.isfile(filename):
os.remove(filename)
#create a new FITS file, using HDUList instance
ofd = pf.HDUList(pf.PrimaryHDU())
#new image HDU
hdu = pf.ImageHDU(data=image)
#update and verify the header
hdu.header.add_history('Created by VISsim at %s' % datetime.datetime.isoformat(datetime.datetime.now()))
hdu.verify('fix')
ofd.append(hdu)
#write the actual file
ofd.writeto(filename)
def objectOnDetector(self, object):
"""
Tests if the object falls on the detector.
:param object: object to be placed to the self.image.
:return: whether the object falls on the detector or not
:rtype: boolean
"""
ny, nx = 10, 10 #TODO
mx = self.information['xsize']
my = self.information['ysize']
xt = object[0]
yt = object[1]
if object[3] > 0:
#galaxy
#fac = (0.2**((object[2] - 22.)/7.)) / self.shapey[object[3]] / 2.
fac = (0.2**((object[2] - 22.)/7.)) / 10. / 2.
else:
#star
fac = 1.0
#Assess the boundary box of the input image.
xlo = (1 - nx) * 0.5 * fac + xt
xhi = (nx - 1) * 0.5 * fac + xt
ylo = (1 - ny) * 0.5 * fac + yt
yhi = (ny - 1) * 0.5 * fac + yt
i1 = np.floor(xlo + 0.5)
i2 = np.floor(xhi + 0.5) + 1
j1 = np.floor(ylo + 0.5)
j2 = np.floor(yhi + 0.5) + 1
if i2 < 1 or i1 > mx:
return False
if j2 < 1 or j1 > my:
return False
return True
def overlayToCCD(self, data, obj):
"""
Overlay data from a source object onto the self.image.
:param data: ndarray of data to be overlaid on to self.image
:type data: ndarray
:param obj: object information such as x,y position
:type obj: list
"""
#object centre x and y coordinates (only in full pixels, fractional has been taken into account already)
xt = np.floor(obj[0])
yt = np.floor(obj[1])
#input array size
nx = data.shape[1]
ny = data.shape[0]
# Assess the boundary box of the input image
xlo = (1 - nx) * 0.5 + xt
xhi = (nx - 1) * 0.5 + xt + 1
ylo = (1 - ny) * 0.5 + yt
yhi = (ny - 1) * 0.5 + yt + 1
i1 = int(np.floor(xlo + 0.5))
if i1 < 1:
i1 = 0
i2 = int(np.floor(xhi + 0.5))
if i2 > self.information['xsize']:
i2 = self.information['xsize']
j1 = int(np.floor(ylo + 0.5))
if j1 < 1:
j1 = 0
j2 = int(np.floor(yhi + 0.5))
if j2 > self.information['ysize']:
j2 = self.information['ysize']
if i1 > i2 or j1 > j2:
self.log.info('Object does not fall on the detector...')
return
ni = i2 - i1
nj = j2 - j1
self.log.info('Adding an object to (x,y)=({0:.4f}, {1:.4f})'.format(xt, yt))
self.log.info('Bounding box = [%i, %i : %i, %i]' % (i1, i2, j1, j2))
#add to the image
if ni == nx and nj == ny:
#full frame will fit
self.image[j1:j2, i1:i2] += data
elif ni < nx and nj == ny:
#x dimensions shorter
if int(np.floor(xlo + 0.5)) < 1:
#small values, left side
self.image[j1:j2, i1:i2] += data[:, nx-ni:]
else:
#large values, right side
self.image[j1:j2, i1:i2] += data[:, :ni]
elif nj < ny and ni == nx:
#y dimensions shorter
if int(np.floor(ylo + 0.5)) < 1:
#small values, bottom
self.image[j1:j2, i1:i2] += data[ny-nj:, :]
else:
#large values, top
self.image[j1:j2, i1:i2] += data[:nj, :]
else:
#both lengths smaller, can be in any of the four corners
if int(np.floor(xlo + 0.5)) < 1 > int(np.floor(ylo + 0.5)):
#left lower
self.image[j1:j2, i1:i2] += data[ny-nj:, nx-ni:]
elif int(np.floor(xlo + 0.5)) < 1 and int(np.floor(yhi + 0.5)) > self.information['ysize']:
#left upper
self.image[j1:j2, i1:i2] += data[:nj, nx-ni:]
elif int(np.floor(xhi + 0.5)) > self.information['xsize'] and int(np.floor(ylo + 0.5)) < 1:
#right lower
self.image[j1:j2, i1:i2] += data[ny-nj:, :ni]
else:
#right upper
self.image[j1:j2, i1:i2] += data[:nj, :ni]
def writeFITSfile(self, data, filename, unsigned16bit=False):
"""
Writes out a simple FITS file.
:param data: data to be written
:type data: ndarray
:param filename: name of the output file
:type filename: str
:param unsigned16bit: whether to scale the data using bzero=32768
:type unsigned16bit: bool
:return: None
"""
if os.path.isfile(filename):
os.remove(filename)
#create a new FITS file, using HDUList instance
ofd = pf.HDUList(pf.PrimaryHDU())
#new image HDU
hdu = pf.ImageHDU(data=data)
#convert to unsigned 16bit int if requested
if unsigned16bit:
hdu.scale('int16', '', bzero=32768)
hdu.header.add_history('Scaled to unsigned 16bit integer!')
#add input keywords to the header
for key, value in self.information.iteritems():
#truncate long keys
if len(key) > 8:
key = key[:7]
try:
hdu.header.update(key.upper(), value)
except:
try:
hdu.header.update(key.upper(), str(value))
except:
pass
#write booleans
for key, value in self.booleans.iteritems():
#truncate long keys
if len(key) > 8:
key = key[:7]
hdu.header.update(key.upper(), str(value), 'Boolean Flags')
#update and verify the header
hdu.header.add_history('This is an intermediate data product no the final output!')
hdu.header.add_history('Created by VISsim (version=%s) at %s' % (__version__, datetime.datetime.isoformat(datetime.datetime.now())))
hdu.verify('fix')
ofd.append(hdu)
#write the actual file
ofd.writeto(filename)
def configure(self):
"""
Configures the simulator with input information and creates and empty array to which the final image will
be build on.
"""
self.readConfigs()
self.processConfigs()
self._createEmpty()
self.log.info('Read in the configuration files and created and empty array')
def readObjectlist(self):
"""
Reads object list using numpy.loadtxt, determines the number of object types,
and finds the file that corresponds to a given object type.
The input catalog is assumed to contain the following columns:
#. x coordinate
#. y coordinate
#. apparent magnitude of the object
#. type of the object [0=star, number=type defined in the objects.dat]
#. rotation [0 for stars, [0, 360] for galaxies]
This method also displaces the object coordinates based on the quadrant and the
CCD to be simulated and masks all objects outside the quadrant FoV.
"""
self.objects = np.loadtxt(self.information['sourcelist'])
self.originalx = self.objects[:, 0].copy()
self.originaly = self.objects[:, 1].copy()
strg = '{0:d} sources read from {1:s}'.format(np.shape(self.objects)[0], self.information['sourcelist'])
self.log.info(strg)
#the input sextractor catalog has been derived from oversampled ACS data
self.objects[:, 0] /= self.information['psfoversampling']
self.objects[:, 1] /= self.information['psfoversampling']
#change the image coordinates based on the CCD being simulated
if self.information['ccdx'] > 0:
#x coordinate shift
self.objects[:, 0] -= (self.information['ccdx'] * (4196. + (1.643 * 1000 / 12.)))
if self.information['ccdy'] > 0:
#y coordinate shift
self.objects[:, 1] -= (self.information['ccdy'] * (4132. + (8.116 * 1000 / 12.)))
#and quadrant
if self.information['quadrant'] > 0:
if self.information['quadrant'] > 1:
#change y coordinate value
self.log.info('Changing y coordinates to take into account quadrant')
self.objects[:, 1] -= self.information['ysize']
if self.information['quadrant'] % 2 != 0:
self.log.info('Changing x coordinates to take into account quadrant')
self.objects[:, 0] -= self.information['xsize']
def readPSFs(self):
"""
Reads in a PSF from a FITS file.
.. Note:: at the moment this method supports only a single PSF file.
"""
if self.information['variablePSF']:
#grid of PSFs
self.log.debug('Spatially variable PSF:')
self.log.error('NOT IMPLEMENTED!')
sys.exit(-9)
else:
#single PSF
self.log.debug('Spatially static PSF:')
self.log.info('Opening PSF file %s' % self.information['psffile'])
self.PSF = pf.getdata(self.information['psffile']).astype(np.float64)
self.PSF /= np.sum(self.PSF)
self.PSFx = self.PSF.shape[1]
self.PSFy = self.PSF.shape[0]
self.log.info('PSF sampling (x,y) = (%i, %i) ' % (self.PSFx, self.PSFy))
def addObjects(self):
"""
Add objects from the object list to the CCD image (self.image).
Scale the object's brightness in electrons and size using the input catalog magnitude.
The size scaling is a crude fit to Massey et al. plot.
.. Note: scipy.signal.fftconvolve seems to be significantly faster than scipy.signal.convolve2d.
"""
#total number of objects in the input catalogue and counter for visible objects
n_objects = self.objects.shape[0]
visible = 0
self.log.info('Number of CCD transits = %i' % self.information['exposures'])
self.log.info('Total number of objects in the input catalog = %i' % n_objects)
#calculate the scaling factors from the magnitudes
intscales = 10.0**(-0.4 * self.objects[:, 2]) * \
self.information['magzero'] * \
self.information['exptime']
#TODO remove hardcoded file name
fh = pf.open('hlsp_candels_hst_acs_cos-tot_f814w_v1.0_drz.fits', mmap=True) #memmap
#loop over the number of objects
for j, obj in enumerate(self.objects):
stype = obj[3]
if self.objectOnDetector(obj):
visible += 1
if stype == 0:
#point source, apply PSF
txt = "Star: " + str(j+1) + "/" + str(n_objects) + " intscale=" + str(intscales[j])
print txt
self.log.info(txt)
#data, simply copy PSF
data = self.PSF.copy()
#map the data to new grid aligned with the centre of the object and scale
yind, xind = np.indices(data.shape)
yi = yind.astype(np.float) + (obj[0] % 1)
xi = xind.astype(np.float) + (obj[1] % 1)
data = ndimage.map_coordinates(data, [yi, xi], order=1, mode='nearest')
if self.information['psfoversampling'] != 1.0:
data = scipy.ndimage.zoom(data, 1./self.information['psfoversampling'], order=1)
#suppress negative numbers, renormalise and scale with the intscale
data[data < 0.0] = 0.0
sum = np.sum(data)
sca = intscales[j] / sum
data = ne.evaluate("data * sca")
self.log.info('Maximum value of the data added is %.2f electrons' % np.max(data))
#overlay the scaled PSF on the image
self.overlayToCCD(data, obj)
else:
#extended source, load data from correct x and y position
#cutout a postage stamp from the COSMOS data
sz = 0.2**((obj[2] - 22.)/7.) * 110
xmin = int(max(self.originalx[j]-sz, 0))
ymin = int(max(self.originaly[j]-sz, 0))
xmax = int(min(self.originalx[j]+sz, fh[0].data.shape[1]))
ymax = int(min(self.originaly[j]+sz, fh[0].data.shape[0]))
data = fh[0].data[ymin:ymax, xmin:xmax].copy().astype(np.float64)
#renormalize the flux, try to cope with background
data[data < 1e-5] = 0.0
data /= data.sum()
txt = "Galaxy: " +str(j+1) + "/" + str(n_objects) + " magnitude=" + str(obj[2]) + \
" intscale=" + str(intscales[j]) + \
" original position=" + str(self.originalx[j]) + ', ' + str(self.originaly[j]) + \
" postage stamp size=" + str(sz)
print txt
self.log.info(txt)
#convolve with the PSF
conv = signal.fftconvolve(data, self.PSF, mode='full')
del data
#scale the galaxy image size with the inverse of the PSF over sampling factor
if self.information['psfoversampling'] != 1.0:
conv = scipy.ndimage.zoom(conv, 1./self.information['psfoversampling'], order=1)
#suppress negative numbers
conv[conv < 0.0] = 0.0
#renormalise and scale to the right magnitude
sum = np.sum(conv)
sca = intscales[j] / sum
conv = ne.evaluate("conv * sca")
#tiny galaxies sometimes end up with completely zero array
#checking this costs time, so perhaps this could be removed
if np.isnan(np.sum(conv)):
print 'ERROR -- small galaxy, no pixels to include'
continue
if self.debug:
scipy.misc.imsave('image%i.jpg' % (j+1), conv/np.max(conv)*255)
self.writeFITSfile(conv, 'afterconv%i.fits' % (j+1))
self.log.info('Maximum value of the data added is %.3f electrons' % np.max(conv))
#overlay the convolved image on the image
self.overlayToCCD(conv, obj)
self.log.info('%i objects were place on the detector' % visible)
print '%i objects were place on the detector' % visible
def addLampFlux(self):
"""
Include flux from the calibration source.
"""
self.image += pf.getdata(self.information['flatflux'])
self.log.info('Flux from the calibration unit included (%s)' % self.information['flatflux'])
def applyFlatfield(self):
"""
Applies multiplicative flat field to emulate pixel-to-pixel non-uniformity.
Because the pixel-to-pixel non-uniformity effect (i.e. multiplicative) flat fielding takes place
before CTI and other effects, the flat field file must be the same size as the pixels that see
the sky. Thus, in case of a single quadrant (x, y) = (2048, 2066).
"""
flat = pf.getdata(self.information['flatfieldfile'])
self.image *= flat
self.log.info('Applied multiplicative flat (pixel-to-pixel non-uniformity) from %s...' %
self.information['flatfieldfile'])
def addChargeInjection(self):
"""
Add either horizontal or vertical charge injection line to the image.
"""
if self.chargeInjectionx:
#self.image[self.information['ysize']/2self.information['ysize']/2-10:self.information['ysize']/2, :] = self.information['injection']
self.image[1500:1511, :] = self.information['injection']
self.log.info('Adding vertical charge injection line')
if self.chargeInjectiony:
#self.image[:, self.information['xsize']/2-10:self.information['xsize']/2] = self.information['injection']
self.image[:, 1500:1511] = self.information['injection']
#self.image[:, 1950:1961] = self.information['injection']
self.log.info('Adding horizontal charge injection line')
def addCosmicRays(self):
"""
Add cosmic rays to the arrays based on a power-law intensity distribution for tracks.
Cosmic ray properties (such as location and angle) are chosen from random Uniform distribution.
"""
self.readCosmicRayInformation()
#estimate the number of cosmics
cr_n = self.information['xsize'] * self.information['ysize'] * 0.014 / 43.263316 * 2.
#scale with exposure time, the above numbers are for the nominal 565s exposure
cr_n *= (self.information['exptime'] / 565.0)
#assume a power-law intensity distribution for tracks
fit = dict(cr_lo=1.0e3, cr_hi=1.0e5, cr_q=2.0e0)
fit['q1'] = 1.0e0 - fit['cr_q']
fit['en1'] = fit['cr_lo'] ** fit['q1']
fit['en2'] = fit['cr_hi'] ** fit['q1']
#choose the length of the tracks
#pseudo-random number taken from a uniform distribution between 0 and 1
luck = np.random.rand(int(np.floor(cr_n)))
if self.cr['cr_cdfn'] > 1:
ius = InterpolatedUnivariateSpline(self.cr['cr_cdf'], self.cr['cr_u'])
self.cr['cr_l'] = ius(luck)
else:
self.cr['cr_l'] = np.sqrt(1.0 - luck ** 2) / luck
if self.cr['cr_cden'] > 1:
ius = InterpolatedUnivariateSpline(self.cr['cr_cde'], self.cr['cr_v'])
self.cr['cr_e'] = ius(luck)
else:
self.cr['cr_e'] = (fit['en1'] + (fit['en2'] - fit['en1']) *
np.random.rand(int(np.floor(cr_n)))) ** (1.0 / fit['q1'])
#Choose the properties such as positions and an angle from a random Uniform dist
cr_x = self.information['xsize'] * np.random.rand(int(np.floor(cr_n)))
cr_y = self.information['ysize'] * np.random.rand(int(np.floor(cr_n)))
cr_phi = np.pi * np.random.rand(int(np.floor(cr_n)))
#find the intercepts
CCD_cr = self.cosmicRayIntercepts(self.cr['cr_e'], cr_x, cr_y, self.cr['cr_l'], cr_phi)
#save image without cosmics rays
if self.nonlinearity:
tmp = VISinstrumentModel.CCDnonLinearityModel(self.image.copy())
self.writeFITSfile(tmp, 'nonoisenocr' + self.information['output'])
else:
self.writeFITSfile(self.image, 'nonoisenocr' + self.information['output'])
#image without cosmic rays
self.imagenoCR = self.image.copy()
#paste the information
self.image += CCD_cr
#save cosmic ray image map
self.cosmicMap = CCD_cr
#count the covering factor
area_cr = np.count_nonzero(self.cosmicMap)
self.log.info('The cosmic ray covering factor is %i pixels ' % area_cr)
#output information to a FITS file
self.writeFITSfile(self.cosmicMap, 'cosmicraymap' + self.information['output'])
def applyDarkCurrentAndCosmicBackground(self):
"""
Apply dark current and the cosmic background.
Scales dark and background with the exposure time.
Additionally saves the image without noise to a FITS file.
"""
#save no noise image
self.writeFITSfile(self.image, 'nonoise' + self.information['output'])
#add dark and background
noise = self.information['exptime'] * (self.information['dark'] + self.information['cosmic_bkgd'])
self.image += noise
self.log.info('Added dark noise and cosmic background = %f' % noise)
if self.cosmicRays:
self.imagenoCR += noise
def applyScatteredLight(self):
"""
Adds spatially uniform scattered light to the image.
"""
sl = self.information['exptime'] * self.information['scattered_light']
self.image += sl
self.log.info('Added scattered light = %f' % sl)
def applyPoissonNoise(self):
"""
Add Poisson noise to the image.
"""
rounded = np.rint(self.image)
residual = self.image.copy() - rounded #ugly workaround for multiple rounding operations...
rounded[rounded < 0.0] = 0.0
self.image = np.random.poisson(rounded).astype(np.float64)
self.log.info('Added Poisson noise')
self.image += residual
if self.cosmicRays:
#self.imagenoCR[ self.imagenoCR < 0.0] = 0.0
self.imagenoCR = np.random.poisson(np.rint(self.imagenoCR)).astype(np.float64)
def applyCosmetics(self):
"""
Apply cosmetic defects described in the input file.
.. Warning:: This method does not work if the input file has exactly one line.
"""
cosmetics = np.loadtxt(self.information['cosmeticsFile'], delimiter=',')
for line in cosmetics:
x = int(np.floor(line[1]))
y = int(np.floor(line[2]))
value = line[3]
self.image[y, x] = value
self.log.info('Adding cosmetic defects from %s:' % input)
self.log.info('x=%i, y=%i, value=%f' % (x, y, value))
def applyRadiationDamage(self):
"""
Applies CDM03 radiation model to the image being constructed.
.. seealso:: Class :`CDM03`
"""
#save image without CTI
self.noCTI = self.image.copy()
self.writeFITSfile(self.noCTI, 'noctinonoise' + self.information['output'])
self.log.debug('Starting to apply radiation damage model...')
#at this point we can give fake data...
cti = CTI.CDM03(dict(trapfile=(self.information['trapfile'])), [-1,], log=self.log)
#here we need the right input data
self.image = cti.applyRadiationDamage(self.image, iquadrant=self.information['quadrant'])
self.log.info('Radiation damage added.')
if self.cosmicRays:
self.log.info('Adding radiation damage to the no cosmic rays image...')
self.imagenoCR = cti.applyRadiationDamage(self.imagenoCR,
iquadrant=self.information['quadrant'])
def applyNonlinearity(self):
"""
Applies a CCD273 non-linearity model to the image being constructed.
"""
#save fully linear image
self.writeFITSfile(self.image, 'nononlinearity' + self.information['output'])
self.log.debug('Starting to apply non-linearity model...')
self.image = VISinstrumentModel.CCDnonLinearityModel(self.image.copy())
self.log.info('Non-linearity effects included.')
if self.cosmicRays:
self.imagenoCR = VISinstrumentModel.CCDnonLinearityModel(self.imagenoCR.copy())
def applyReadoutNoise(self):
"""
Applies readout noise to the image being constructed.
The noise is drawn from a Normal (Gaussian) distribution with average=0.0 and std=readout noise.
"""
noise = np.random.normal(loc=0.0, scale=self.information['readout'], size=self.image.shape)
self.log.info('Sum of readnoise = %f' % np.sum(noise))
#save the readout noise image
self.writeFITSfile(noise, 'readoutnoise' + self.information['output'])
#add to the image
self.image += noise
if self.radiationDamage:
self.noCTI += noise
self.noCTI /= self.information['e_adu']
if self.cosmicRays:
self.imagenoCR += noise
def electrons2ADU(self):
"""
Convert from electrons to ADUs using the value read from the configuration file.
"""
if self.debug:
#save the image without converting to integers
self.writeFITSfile(self.image, 'floatsNoGain' + self.information['output'])
self.image /= self.information['e_adu']
self.log.info('Converting from electrons to ADUs using a factor of %f' % self.information['e_adu'])
if self.cosmicRays:
self.imagenoCR /= self.information['e_adu']
def applyBias(self):
"""
Adds a bias level to the image being constructed.
The value of bias is read from the configure file and stored
in the information dictionary (key bias).
"""
self.image += self.information['bias']
self.log.info('Bias of %i counts were added to the image' % self.information['bias'])
if self.cosmicRays:
self.imagenoCR += self.information['bias']
def addPreOverScans(self):
"""
Add pre- and overscan regions to the self.image. These areas are added only in the serial direction.
Because the 1st and 3rd quadrant are read out in to a different serial direction than the nominal
orientation, in these images the regions are mirrored.
The size of prescan and overscan regions are defined by the prescanx and overscanx keywords, respectively.
"""
self.log.info('Adding pre- and overscan regions')
canvas = np.zeros((self.information['ysize'],
(self.information['xsize'] + self.information['prescanx'] + self.information['ovrscanx'])))
#because the pre- and overscans are in x-direction this needs to be taken into account for the
# 1st and 3rd quadrant
if self.information['quadrant'] in (0, 2):
canvas[:, self.information['prescanx']: self.information['prescanx']+self.information['xsize']] = self.image
self.image = canvas
elif self.information['quadrant'] in (1, 3):
canvas[:, self.information['ovrscanx']: self.information['ovrscanx']+self.information['xsize']] = self.image
self.image = canvas
else:
self.log.error('Cannot include pre- and overscan because of an unknown quadrant!')
if self.cosmicRays:
canvas = np.zeros((self.information['ysize'],
(self.information['xsize'] + self.information['prescanx'] + self.information['ovrscanx'])))
if self.information['quadrant'] in (0, 2):
canvas[:, self.information['prescanx']: self.information['prescanx']+self.information['xsize']] = self.imagenoCR
else:
canvas[:, self.information['ovrscanx']: self.information['ovrscanx']+self.information['xsize']] = self.imagenoCR
self.imagenoCR = canvas
def applyBleeding(self):
"""
Apply bleeding along the CCD columns if the number of electrons in a pixel exceeds the full-well capacity.
Bleeding is modelled in the parallel direction only, because the CCD273s are assumed not to bleed in
serial direction.
:return: None
"""
self.log.info('Applying column bleeding...')
#loop over each column, as bleeding is modelled column-wise
for i, column in enumerate(self.image.T):
sum = 0.
for j, value in enumerate(column):
#first round - from bottom to top (need to half the bleeding)
overload = value - self.information['fullwellcapacity']
if overload > 0.:
overload /= 2.
self.image[j, i] -= overload
sum += overload
elif sum > 0.:
if -overload > sum:
overload = -sum
self.image[j, i] -= overload
sum += overload
for i, column in enumerate(self.image.T):
sum = 0.
for j, value in enumerate(column[::-1]):
#second round - from top to bottom (bleeding was half'd already, so now full)
overload = value - self.information['fullwellcapacity']
if overload > 0.:
self.image[-j-1, i] -= overload
sum += overload
elif sum > 0.:
if -overload > sum:
overload = -sum
self.image[-j-1, i] -= overload
sum += overload
def discretise(self, max=2**16-1):
"""
Converts a floating point image array (self.image) to an integer array with max values
defined by the argument max.
:param max: maximum value the the integer array may contain [default 65k]
:type max: float
:return: None
"""
#also write out an image without cosmics if those were added
if self.cosmicRays:
self.imagenoCR = np.rint(self.imagenoCR).astype(np.int)
self.imagenoCR[self.imagenoCR > max] = max
self.writeFITSfile(self.imagenoCR, 'nocr' + self.information['output'], unsigned16bit=True)
if self.debug:
#save the image without converting to integers
self.writeFITSfile(self.image, 'floats' + self.information['output'])
#avoid negative numbers in case bias level was not added
#self.image[self.image < 0.0] = 0.
#cut of the values larger than max
self.image[self.image > max] = max
self.image = np.rint(self.image).astype(np.int)
self.log.info('Maximum and total values of the image are %i and %i, respectively' % (np.max(self.image),
np.sum(self.image)))
if self.radiationDamage:
self.noCTI = np.rint(self.noCTI).astype(np.int)
self.noCTI[self.noCTI > max] = max
self.writeFITSfile(self.noCTI, 'nocti' + self.information['output'], unsigned16bit=True)
def writeOutputs(self):
"""
Writes out a FITS file using PyFITS and converts the image array to 16bit unsigned integer as
appropriate for VIS.
Updates header with the input values and flags used during simulation.
"""
if os.path.isfile(self.information['output']):
os.remove(self.information['output'])
#create a new FITS file, using HDUList instance
ofd = pf.HDUList(pf.PrimaryHDU())
#new image HDU
hdu = pf.ImageHDU(data=self.image)
#convert to unsigned 16bit
hdu.scale('int16', '', bzero=32768)
hdu.header.add_history('Scaled to unsigned 16bit integer!')
#add WCS to the header
hdu.header.update('WCSAXES', 2)
hdu.header.update('CRPIX1', self.image.shape[1]/2.)
hdu.header.update('CRPIX2', self.image.shape[0]/2.)
hdu.header.update('CRVAL1', self.information['ra'])
hdu.header.update('CRVAL2', self.information['dec'])
hdu.header.update('CTYPE1', 'RA---TAN')
hdu.header.update('CTYPE2', 'DEC--TAN')
#north is up, east is left
hdu.header.update('CD1_1', -0.1 / 3600.) #pix size in arc sec / deg
hdu.header.update('CD1_2', 0.0)
hdu.header.update('CD2_1', 0.0)
hdu.header.update('CD2_2', 0.1 / 3600.)
hdu.header.update('DATE-OBS', datetime.datetime.isoformat(datetime.datetime.now()))
hdu.header.update('INSTRUME', 'VISsim')
#add input keywords to the header
for key, value in self.information.iteritems():
#truncate long keys
if len(key) > 8:
key = key[:7]
try:
hdu.header.update(key.upper(), value)
except:
try:
hdu.header.update(key.upper(), str(value))
except:
pass
hdu.header.update('NRPUFILE', self.information['flatfieldfile'])
#write booleans
for key, value in self.booleans.iteritems():
#truncate long keys
if len(key) > 8:
key = key[:7]
hdu.header.update(key.upper(), str(value), 'Boolean Flags')
hdu.header.add_history('If questions, please contact Sami-Matias Niemi (s.niemi at ucl.ac.uk).')
hdu.header.add_history('Created by VISsim (version=%s) at %s' % (__version__, datetime.datetime.isoformat(datetime.datetime.now())))
hdu.verify('fix')
ofd.append(hdu)
#write the actual file
ofd.writeto(self.information['output'])
def simulate(self):
"""
Create a single simulated image of a quadrant defined by the configuration file.
Will do all steps defined in the config file sequentially.
:return: None
"""
self.configure()
self.readObjectlist()
self.readPSFs()
if self.addsources:
self.addObjects()
if self.lampFlux:
self.addLampFlux()
if self.flatfieldM:
self.applyFlatfield()
if self.chargeInjectionx or self.chargeInjectiony:
self.addChargeInjection()
if self.cosmicRays:
self.addCosmicRays()
if self.bleeding:
self.applyBleeding()
if self.noise:
self.applyDarkCurrentAndCosmicBackground()
if self.scatteredlight:
self.applyScatteredLight()
if self.noise:
self.applyPoissonNoise()
if self.cosmetics:
self.applyCosmetics()
if self.overscans:
self.addPreOverScans()
if self.radiationDamage:
self.applyRadiationDamage()
if self.nonlinearity:
self.applyNonlinearity()
if self.readoutNoise:
self.applyReadoutNoise()
self.electrons2ADU()
if self.information['bias'] <= 0.0:
self.log.info('Bias level less or equal to zero, will not add bias!')
else:
self.applyBias()
self.discretise()
self.writeOutputs()
def processArgs(printHelp=False):
"""
Processes command line arguments.
"""
parser = OptionParser()
parser.add_option('-c', '--configfile', dest='configfile',
help="Name of the configuration file", metavar="string")
parser.add_option('-s', '--section', dest='section',
help="Name of the section of the config file [SCIENCE]", metavar="string")
parser.add_option('-q', '--quadrant', dest='quadrant', help='CCD quadrant to simulate [0, 1, 2, 3]',
metavar='int')
parser.add_option('-x', '--xCCD', dest='xCCD', help='CCD number in X-direction within the FPA matrix',
metavar='int')
parser.add_option('-y', '--yCCD', dest='yCCD', help='CCD number in Y-direction within the FPA matrix',
metavar='int')
parser.add_option('-d', '--debug', dest='debug', action='store_true',
help='Debugging mode on')
if printHelp:
parser.print_help()
else:
return parser.parse_args()
if __name__ == '__main__':
opts, args = processArgs()
if opts.configfile is None:
processArgs(True)
sys.exit(1)
simulate = VISsimulator(opts)
simulate.simulate()
|
sniemi/EuclidVisibleInstrument
|
simulator/simulatorCOSMOS.py
|
Python
|
bsd-2-clause
| 57,734
|
[
"Galaxy",
"Gaussian"
] |
94565fb3fe769547d267d255dcfeb93a6034b83b32cc322f928a8136b4b7e175
|
# sql/elements.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core SQL expression elements, including :class:`.ClauseElement`,
:class:`.ColumnElement`, and derived classes.
"""
from __future__ import unicode_literals
from .. import util, exc, inspection
from . import type_api
from . import operators
from .visitors import Visitable, cloned_traverse, traverse
from .annotation import Annotated
import itertools
from .base import Executable, PARSE_AUTOCOMMIT, Immutable, NO_ARG
from .base import _generative
import numbers
import re
import operator
def _clone(element, **kw):
return element._clone()
def collate(expression, collation):
"""Return the clause ``expression COLLATE collation``.
e.g.::
collate(mycolumn, 'utf8_bin')
produces::
mycolumn COLLATE utf8_bin
"""
expr = _literal_as_binds(expression)
return BinaryExpression(
expr,
_literal_as_text(collation),
operators.collate, type_=expr.type)
def between(expr, lower_bound, upper_bound, symmetric=False):
"""Produce a ``BETWEEN`` predicate clause.
E.g.::
from sqlalchemy import between
stmt = select([users_table]).where(between(users_table.c.id, 5, 7))
Would produce SQL resembling::
SELECT id, name FROM user WHERE id BETWEEN :id_1 AND :id_2
The :func:`.between` function is a standalone version of the
:meth:`.ColumnElement.between` method available on all
SQL expressions, as in::
stmt = select([users_table]).where(users_table.c.id.between(5, 7))
All arguments passed to :func:`.between`, including the left side
column expression, are coerced from Python scalar values if a
the value is not a :class:`.ColumnElement` subclass. For example,
three fixed values can be compared as in::
print(between(5, 3, 7))
Which would produce::
:param_1 BETWEEN :param_2 AND :param_3
:param expr: a column expression, typically a :class:`.ColumnElement`
instance or alternatively a Python scalar expression to be coerced
into a column expression, serving as the left side of the ``BETWEEN``
expression.
:param lower_bound: a column or Python scalar expression serving as the
lower bound of the right side of the ``BETWEEN`` expression.
:param upper_bound: a column or Python scalar expression serving as the
upper bound of the right side of the ``BETWEEN`` expression.
:param symmetric: if True, will render " BETWEEN SYMMETRIC ". Note
that not all databases support this syntax.
.. versionadded:: 0.9.5
.. seealso::
:meth:`.ColumnElement.between`
"""
expr = _literal_as_binds(expr)
return expr.between(lower_bound, upper_bound, symmetric=symmetric)
def literal(value, type_=None):
"""Return a literal clause, bound to a bind parameter.
Literal clauses are created automatically when non-
:class:`.ClauseElement` objects (such as strings, ints, dates, etc.) are
used in a comparison operation with a :class:`.ColumnElement` subclass,
such as a :class:`~sqlalchemy.schema.Column` object. Use this function
to force the generation of a literal clause, which will be created as a
:class:`BindParameter` with a bound value.
:param value: the value to be bound. Can be any Python object supported by
the underlying DB-API, or is translatable via the given type argument.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which
will provide bind-parameter translation for this literal.
"""
return BindParameter(None, value, type_=type_, unique=True)
def type_coerce(expression, type_):
"""Associate a SQL expression with a particular type, without rendering
``CAST``.
E.g.::
from sqlalchemy import type_coerce
stmt = select([type_coerce(log_table.date_string, StringDateTime())])
The above construct will produce SQL that is usually otherwise unaffected
by the :func:`.type_coerce` call::
SELECT date_string FROM log
However, when result rows are fetched, the ``StringDateTime`` type
will be applied to result rows on behalf of the ``date_string`` column.
A type that features bound-value handling will also have that behavior
take effect when literal values or :func:`.bindparam` constructs are
passed to :func:`.type_coerce` as targets.
For example, if a type implements the :meth:`.TypeEngine.bind_expression`
method or :meth:`.TypeEngine.bind_processor` method or equivalent,
these functions will take effect at statement compilation/execution time
when a literal value is passed, as in::
# bound-value handling of MyStringType will be applied to the
# literal value "some string"
stmt = select([type_coerce("some string", MyStringType)])
:func:`.type_coerce` is similar to the :func:`.cast` function,
except that it does not render the ``CAST`` expression in the resulting
statement.
:param expression: A SQL expression, such as a :class:`.ColumnElement`
expression or a Python string which will be coerced into a bound literal
value.
:param type_: A :class:`.TypeEngine` class or instance indicating
the type to which the expression is coerced.
.. seealso::
:func:`.cast`
"""
type_ = type_api.to_instance(type_)
if hasattr(expression, '__clause_element__'):
return type_coerce(expression.__clause_element__(), type_)
elif isinstance(expression, BindParameter):
bp = expression._clone()
bp.type = type_
return bp
elif not isinstance(expression, Visitable):
if expression is None:
return Null()
else:
return literal(expression, type_=type_)
else:
return Label(None, expression, type_=type_)
def outparam(key, type_=None):
"""Create an 'OUT' parameter for usage in functions (stored procedures),
for databases which support them.
The ``outparam`` can be used like a regular function parameter.
The "output" value will be available from the
:class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters``
attribute, which returns a dictionary containing the values.
"""
return BindParameter(
key, None, type_=type_, unique=False, isoutparam=True)
def not_(clause):
"""Return a negation of the given clause, i.e. ``NOT(clause)``.
The ``~`` operator is also overloaded on all
:class:`.ColumnElement` subclasses to produce the
same result.
"""
return operators.inv(_literal_as_binds(clause))
@inspection._self_inspects
class ClauseElement(Visitable):
"""Base class for elements of a programmatically constructed SQL
expression.
"""
__visit_name__ = 'clause'
_annotations = {}
supports_execution = False
_from_objects = []
bind = None
_is_clone_of = None
is_selectable = False
is_clause_element = True
description = None
_order_by_label_element = None
_is_from_container = False
def _clone(self):
"""Create a shallow copy of this ClauseElement.
This method may be used by a generative API. Its also used as
part of the "deep" copy afforded by a traversal that combines
the _copy_internals() method.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
ClauseElement._cloned_set._reset(c)
ColumnElement.comparator._reset(c)
# this is a marker that helps to "equate" clauses to each other
# when a Select returns its list of FROM clauses. the cloning
# process leaves around a lot of remnants of the previous clause
# typically in the form of column expressions still attached to the
# old table.
c._is_clone_of = self
return c
@property
def _constructor(self):
"""return the 'constructor' for this ClauseElement.
This is for the purposes for creating a new object of
this type. Usually, its just the element's __class__.
However, the "Annotated" version of the object overrides
to return the class of its proxied element.
"""
return self.__class__
@util.memoized_property
def _cloned_set(self):
"""Return the set consisting all cloned ancestors of this
ClauseElement.
Includes this ClauseElement. This accessor tends to be used for
FromClause objects to identify 'equivalent' FROM clauses, regardless
of transformative operations.
"""
s = util.column_set()
f = self
while f is not None:
s.add(f)
f = f._is_clone_of
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_is_clone_of', None)
return d
def _annotate(self, values):
"""return a copy of this ClauseElement with annotations
updated by the given dictionary.
"""
return Annotated(self, values)
def _with_annotations(self, values):
"""return a copy of this ClauseElement with annotations
replaced by the given dictionary.
"""
return Annotated(self, values)
def _deannotate(self, values=None, clone=False):
"""return a copy of this :class:`.ClauseElement` with annotations
removed.
:param values: optional tuple of individual values
to remove.
"""
if clone:
# clone is used when we are also copying
# the expression for a deep deannotation
return self._clone()
else:
# if no clone, since we have no annotations we return
# self
return self
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_clauseelement(self, multiparams, params)
def unique_params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Same functionality as ``params()``, except adds `unique=True`
to affected bind parameters so that multiple statements can be
used.
"""
return self._params(True, optionaldict, kwargs)
def params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Returns a copy of this ClauseElement with :func:`bindparam()`
elements replaced with values taken from the given dictionary::
>>> clause = column('x') + bindparam('foo')
>>> print clause.compile().params
{'foo':None}
>>> print clause.params({'foo':7}).compile().params
{'foo':7}
"""
return self._params(False, optionaldict, kwargs)
def _params(self, unique, optionaldict, kwargs):
if len(optionaldict) == 1:
kwargs.update(optionaldict[0])
elif len(optionaldict) > 1:
raise exc.ArgumentError(
"params() takes zero or one positional dictionary argument")
def visit_bindparam(bind):
if bind.key in kwargs:
bind.value = kwargs[bind.key]
bind.required = False
if unique:
bind._convert_to_unique()
return cloned_traverse(self, {}, {'bindparam': visit_bindparam})
def compare(self, other, **kw):
"""Compare this ClauseElement to the given ClauseElement.
Subclasses should override the default behavior, which is a
straight identity comparison.
\**kw are arguments consumed by subclass compare() methods and
may be used to modify the criteria for comparison.
(see :class:`.ColumnElement`)
"""
return self is other
def _copy_internals(self, clone=_clone, **kw):
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
The given clone function should be used, which may be applying
additional transformations to the element (i.e. replacement
traversal, cloned traversal, annotations).
"""
pass
def get_children(self, **kwargs):
"""Return immediate child elements of this :class:`.ClauseElement`.
This is used for visit traversal.
\**kwargs may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
return []
def self_group(self, against=None):
"""Apply a 'grouping' to this :class:`.ClauseElement`.
This method is overridden by subclasses to return a
"grouping" construct, i.e. parenthesis. In particular
it's used by "binary" expressions to provide a grouping
around themselves when placed into a larger expression,
as well as by :func:`.select` constructs when placed into
the FROM clause of another :func:`.select`. (Note that
subqueries should be normally created using the
:meth:`.Select.alias` method, as many platforms require
nested SELECT statements to be named).
As expressions are composed together, the application of
:meth:`self_group` is automatic - end-user code should never
need to use this method directly. Note that SQLAlchemy's
clause constructs take operator precedence into account -
so parenthesis might not be needed, for example, in
an expression like ``x OR (y AND z)`` - AND takes precedence
over OR.
The base :meth:`self_group` method of :class:`.ClauseElement`
just returns self.
"""
return self
@util.dependencies("sqlalchemy.engine.default")
def compile(self, default, bind=None, dialect=None, **kw):
"""Compile this SQL expression.
The return value is a :class:`~.Compiled` object.
Calling ``str()`` or ``unicode()`` on the returned value will yield a
string representation of the result. The
:class:`~.Compiled` object also can return a
dictionary of bind parameter names and values
using the ``params`` accessor.
:param bind: An ``Engine`` or ``Connection`` from which a
``Compiled`` will be acquired. This argument takes precedence over
this :class:`.ClauseElement`'s bound engine, if any.
:param column_keys: Used for INSERT and UPDATE statements, a list of
column names which should be present in the VALUES clause of the
compiled statement. If ``None``, all columns from the target table
object are rendered.
:param dialect: A ``Dialect`` instance from which a ``Compiled``
will be acquired. This argument takes precedence over the `bind`
argument as well as this :class:`.ClauseElement`'s bound engine,
if any.
:param inline: Used for INSERT statements, for a dialect which does
not support inline retrieval of newly generated primary key
columns, will force the expression used to create the new primary
key value to be rendered inline within the INSERT statement's
VALUES clause. This typically refers to Sequence execution but may
also refer to any server-side default generation function
associated with a primary key `Column`.
:param compile_kwargs: optional dictionary of additional parameters
that will be passed through to the compiler within all "visit"
methods. This allows any custom flag to be passed through to
a custom compilation construct, for example. It is also used
for the case of passing the ``literal_binds`` flag through::
from sqlalchemy.sql import table, column, select
t = table('t', column('x'))
s = select([t]).where(t.c.x == 5)
print s.compile(compile_kwargs={"literal_binds": True})
.. versionadded:: 0.9.0
.. seealso::
:ref:`faq_sql_expression_string`
"""
if not dialect:
if bind:
dialect = bind.dialect
elif self.bind:
dialect = self.bind.dialect
bind = self.bind
else:
dialect = default.DefaultDialect()
return self._compiler(dialect, bind=bind, **kw)
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.statement_compiler(dialect, self, **kw)
def __str__(self):
if util.py3k:
return str(self.compile())
else:
return unicode(self.compile()).encode('ascii', 'backslashreplace')
def __and__(self, other):
"""'and' at the ClauseElement level.
.. deprecated:: 0.9.5 - conjunctions are intended to be
at the :class:`.ColumnElement`. level
"""
return and_(self, other)
def __or__(self, other):
"""'or' at the ClauseElement level.
.. deprecated:: 0.9.5 - conjunctions are intended to be
at the :class:`.ColumnElement`. level
"""
return or_(self, other)
def __invert__(self):
if hasattr(self, 'negation_clause'):
return self.negation_clause
else:
return self._negate()
def _negate(self):
return UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
negate=None)
def __bool__(self):
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
def __repr__(self):
friendly = self.description
if friendly is None:
return object.__repr__(self)
else:
return '<%s.%s at 0x%x; %s>' % (
self.__module__, self.__class__.__name__, id(self), friendly)
class ColumnElement(operators.ColumnOperators, ClauseElement):
"""Represent a column-oriented SQL expression suitable for usage in the
"columns" clause, WHERE clause etc. of a statement.
While the most familiar kind of :class:`.ColumnElement` is the
:class:`.Column` object, :class:`.ColumnElement` serves as the basis
for any unit that may be present in a SQL expression, including
the expressions themselves, SQL functions, bound parameters,
literal expressions, keywords such as ``NULL``, etc.
:class:`.ColumnElement` is the ultimate base class for all such elements.
A wide variety of SQLAlchemy Core functions work at the SQL expression
level, and are intended to accept instances of :class:`.ColumnElement` as
arguments. These functions will typically document that they accept a
"SQL expression" as an argument. What this means in terms of SQLAlchemy
usually refers to an input which is either already in the form of a
:class:`.ColumnElement` object, or a value which can be **coerced** into
one. The coercion rules followed by most, but not all, SQLAlchemy Core
functions with regards to SQL expressions are as follows:
* a literal Python value, such as a string, integer or floating
point value, boolean, datetime, ``Decimal`` object, or virtually
any other Python object, will be coerced into a "literal bound
value". This generally means that a :func:`.bindparam` will be
produced featuring the given value embedded into the construct; the
resulting :class:`.BindParameter` object is an instance of
:class:`.ColumnElement`. The Python value will ultimately be sent
to the DBAPI at execution time as a paramterized argument to the
``execute()`` or ``executemany()`` methods, after SQLAlchemy
type-specific converters (e.g. those provided by any associated
:class:`.TypeEngine` objects) are applied to the value.
* any special object value, typically ORM-level constructs, which
feature a method called ``__clause_element__()``. The Core
expression system looks for this method when an object of otherwise
unknown type is passed to a function that is looking to coerce the
argument into a :class:`.ColumnElement` expression. The
``__clause_element__()`` method, if present, should return a
:class:`.ColumnElement` instance. The primary use of
``__clause_element__()`` within SQLAlchemy is that of class-bound
attributes on ORM-mapped classes; a ``User`` class which contains a
mapped attribute named ``.name`` will have a method
``User.name.__clause_element__()`` which when invoked returns the
:class:`.Column` called ``name`` associated with the mapped table.
* The Python ``None`` value is typically interpreted as ``NULL``,
which in SQLAlchemy Core produces an instance of :func:`.null`.
A :class:`.ColumnElement` provides the ability to generate new
:class:`.ColumnElement`
objects using Python expressions. This means that Python operators
such as ``==``, ``!=`` and ``<`` are overloaded to mimic SQL operations,
and allow the instantiation of further :class:`.ColumnElement` instances
which are composed from other, more fundamental :class:`.ColumnElement`
objects. For example, two :class:`.ColumnClause` objects can be added
together with the addition operator ``+`` to produce
a :class:`.BinaryExpression`.
Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses
of :class:`.ColumnElement`::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
.. seealso::
:class:`.Column`
:func:`.expression.column`
"""
__visit_name__ = 'column'
primary_key = False
foreign_keys = []
_label = None
"""The named label that can be used to target
this column in a result set.
This label is almost always the label used when
rendering <expr> AS <label> in a SELECT statement. It also
refers to a name that this column expression can be located from
in a result set.
For a regular Column bound to a Table, this is typically the label
<tablename>_<columnname>. For other constructs, different rules
may apply, such as anonymized labels and others.
"""
key = None
"""the 'key' that in some circumstances refers to this object in a
Python namespace.
This typically refers to the "key" of the column as present in the
``.c`` collection of a selectable, e.g. sometable.c["somekey"] would
return a Column with a .key of "somekey".
"""
_key_label = None
"""A label-based version of 'key' that in some circumstances refers
to this object in a Python namespace.
_key_label comes into play when a select() statement is constructed with
apply_labels(); in this case, all Column objects in the ``.c`` collection
are rendered as <tablename>_<columnname> in SQL; this is essentially the
value of ._label. But to locate those columns in the ``.c`` collection,
the name is along the lines of <tablename>_<key>; that's the typical
value of .key_label.
"""
_render_label_in_columns_clause = True
"""A flag used by select._columns_plus_names that helps to determine
we are actually going to render in terms of "SELECT <col> AS <label>".
This flag can be returned as False for some Column objects that want
to be rendered as simple "SELECT <col>"; typically columns that don't have
any parent table and are named the same as what the label would be
in any case.
"""
_resolve_label = None
"""The name that should be used to identify this ColumnElement in a
select() object when "label resolution" logic is used; this refers
to using a string name in an expression like order_by() or group_by()
that wishes to target a labeled expression in the columns clause.
The name is distinct from that of .name or ._label to account for the case
where anonymizing logic may be used to change the name that's actually
rendered at compile time; this attribute should hold onto the original
name that was user-assigned when producing a .label() construct.
"""
_allow_label_resolve = True
"""A flag that can be flipped to prevent a column from being resolvable
by string label name."""
_alt_names = ()
def self_group(self, against=None):
if (against in (operators.and_, operators.or_, operators._asbool) and
self.type._type_affinity
is type_api.BOOLEANTYPE._type_affinity):
return AsBoolean(self, operators.istrue, operators.isfalse)
else:
return self
def _negate(self):
if self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity:
return AsBoolean(self, operators.isfalse, operators.istrue)
else:
return super(ColumnElement, self)._negate()
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@util.memoized_property
def comparator(self):
return self.type.comparator_factory(self)
def __getattr__(self, key):
try:
return getattr(self.comparator, key)
except AttributeError:
raise AttributeError(
'Neither %r object nor %r object has an attribute %r' % (
type(self).__name__,
type(self.comparator).__name__,
key)
)
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def _bind_param(self, operator, obj):
return BindParameter(None, obj,
_compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
@property
def expression(self):
"""Return a column expression.
Part of the inspection interface; returns self.
"""
return self
@property
def _select_iterable(self):
return (self, )
@util.memoized_property
def base_columns(self):
return util.column_set(c for c in self.proxy_set
if not hasattr(c, '_proxies'))
@util.memoized_property
def proxy_set(self):
s = util.column_set([self])
if hasattr(self, '_proxies'):
for c in self._proxies:
s.update(c.proxy_set)
return s
def shares_lineage(self, othercolumn):
"""Return True if the given :class:`.ColumnElement`
has a common ancestor to this :class:`.ColumnElement`."""
return bool(self.proxy_set.intersection(othercolumn.proxy_set))
def _compare_name_for_result(self, other):
"""Return True if the given column element compares to this one
when targeting within a result row."""
return hasattr(other, 'name') and hasattr(self, 'name') and \
other.name == self.name
def _make_proxy(
self, selectable, name=None, name_is_truncatable=False, **kw):
"""Create a new :class:`.ColumnElement` representing this
:class:`.ColumnElement` as it appears in the select list of a
descending selectable.
"""
if name is None:
name = self.anon_label
if self.key:
key = self.key
else:
try:
key = str(self)
except exc.UnsupportedCompilationError:
key = self.anon_label
else:
key = name
co = ColumnClause(
_as_truncated(name) if name_is_truncatable else name,
type_=getattr(self, 'type', None),
_selectable=selectable
)
co._proxies = [self]
if selectable._is_clone_of is not None:
co._is_clone_of = \
selectable._is_clone_of.columns.get(key)
selectable._columns[key] = co
return co
def compare(self, other, use_proxies=False, equivalents=None, **kw):
"""Compare this ColumnElement to another.
Special arguments understood:
:param use_proxies: when True, consider two columns that
share a common base column as equivalent (i.e. shares_lineage())
:param equivalents: a dictionary of columns as keys mapped to sets
of columns. If the given "other" column is present in this
dictionary, if any of the columns in the corresponding set() pass
the comparison test, the result is True. This is used to expand the
comparison to other columns that may be known to be equivalent to
this one via foreign key or other criterion.
"""
to_compare = (other, )
if equivalents and other in equivalents:
to_compare = equivalents[other].union(to_compare)
for oth in to_compare:
if use_proxies and self.shares_lineage(oth):
return True
elif hash(oth) == hash(self):
return True
else:
return False
def label(self, name):
"""Produce a column label, i.e. ``<columnname> AS <name>``.
This is a shortcut to the :func:`~.expression.label` function.
if 'name' is None, an anonymous label name will be generated.
"""
return Label(name, self, self.type)
@util.memoized_property
def anon_label(self):
"""provides a constant 'anonymous label' for this ColumnElement.
This is a label() expression which will be named at compile time.
The same label() is returned each time anon_label is called so
that expressions can reference anon_label multiple times, producing
the same label name at compile time.
the compiler uses this function automatically at compile time
for expressions that are known to be 'unnamed' like binary
expressions and function calls.
"""
while self._is_clone_of is not None:
self = self._is_clone_of
return _anonymous_label(
'%%(%d %s)s' % (id(self), getattr(self, 'name', 'anon'))
)
class BindParameter(ColumnElement):
"""Represent a "bound expression".
:class:`.BindParameter` is invoked explicitly using the
:func:`.bindparam` function, as in::
from sqlalchemy import bindparam
stmt = select([users_table]).\\
where(users_table.c.name == bindparam('username'))
Detailed discussion of how :class:`.BindParameter` is used is
at :func:`.bindparam`.
.. seealso::
:func:`.bindparam`
"""
__visit_name__ = 'bindparam'
_is_crud = False
def __init__(self, key, value=NO_ARG, type_=None,
unique=False, required=NO_ARG,
quote=None, callable_=None,
isoutparam=False,
_compared_to_operator=None,
_compared_to_type=None):
"""Produce a "bound expression".
The return value is an instance of :class:`.BindParameter`; this
is a :class:`.ColumnElement` subclass which represents a so-called
"placeholder" value in a SQL expression, the value of which is
supplied at the point at which the statement in executed against a
database connection.
In SQLAlchemy, the :func:`.bindparam` construct has
the ability to carry along the actual value that will be ultimately
used at expression time. In this way, it serves not just as
a "placeholder" for eventual population, but also as a means of
representing so-called "unsafe" values which should not be rendered
directly in a SQL statement, but rather should be passed along
to the :term:`DBAPI` as values which need to be correctly escaped
and potentially handled for type-safety.
When using :func:`.bindparam` explicitly, the use case is typically
one of traditional deferment of parameters; the :func:`.bindparam`
construct accepts a name which can then be referred to at execution
time::
from sqlalchemy import bindparam
stmt = select([users_table]).\\
where(users_table.c.name == bindparam('username'))
The above statement, when rendered, will produce SQL similar to::
SELECT id, name FROM user WHERE name = :username
In order to populate the value of ``:username`` above, the value
would typically be applied at execution time to a method
like :meth:`.Connection.execute`::
result = connection.execute(stmt, username='wendy')
Explicit use of :func:`.bindparam` is also common when producing
UPDATE or DELETE statements that are to be invoked multiple times,
where the WHERE criterion of the statement is to change on each
invocation, such as::
stmt = (users_table.update().
where(user_table.c.name == bindparam('username')).
values(fullname=bindparam('fullname'))
)
connection.execute(
stmt, [{"username": "wendy", "fullname": "Wendy Smith"},
{"username": "jack", "fullname": "Jack Jones"},
]
)
SQLAlchemy's Core expression system makes wide use of
:func:`.bindparam` in an implicit sense. It is typical that Python
literal values passed to virtually all SQL expression functions are
coerced into fixed :func:`.bindparam` constructs. For example, given
a comparison operation such as::
expr = users_table.c.name == 'Wendy'
The above expression will produce a :class:`.BinaryExpression`
construct, where the left side is the :class:`.Column` object
representing the ``name`` column, and the right side is a
:class:`.BindParameter` representing the literal value::
print(repr(expr.right))
BindParameter('%(4327771088 name)s', 'Wendy', type_=String())
The expression above will render SQL such as::
user.name = :name_1
Where the ``:name_1`` parameter name is an anonymous name. The
actual string ``Wendy`` is not in the rendered string, but is carried
along where it is later used within statement execution. If we
invoke a statement like the following::
stmt = select([users_table]).where(users_table.c.name == 'Wendy')
result = connection.execute(stmt)
We would see SQL logging output as::
SELECT "user".id, "user".name
FROM "user"
WHERE "user".name = %(name_1)s
{'name_1': 'Wendy'}
Above, we see that ``Wendy`` is passed as a parameter to the database,
while the placeholder ``:name_1`` is rendered in the appropriate form
for the target database, in this case the Postgresql database.
Similarly, :func:`.bindparam` is invoked automatically
when working with :term:`CRUD` statements as far as the "VALUES"
portion is concerned. The :func:`.insert` construct produces an
``INSERT`` expression which will, at statement execution time,
generate bound placeholders based on the arguments passed, as in::
stmt = users_table.insert()
result = connection.execute(stmt, name='Wendy')
The above will produce SQL output as::
INSERT INTO "user" (name) VALUES (%(name)s)
{'name': 'Wendy'}
The :class:`.Insert` construct, at compilation/execution time,
rendered a single :func:`.bindparam` mirroring the column
name ``name`` as a result of the single ``name`` parameter
we passed to the :meth:`.Connection.execute` method.
:param key:
the key (e.g. the name) for this bind param.
Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`BindParameter` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. Will be used at statement
execution time as the value for this parameter passed to the
DBAPI, if no other value is indicated to the statement execution
method for this particular parameter name. Defaults to ``None``.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A :class:`.TypeEngine` class or instance representing an optional
datatype for this :func:`.bindparam`. If not passed, a type
may be determined automatically for the bind, based on the given
value; for example, trivial Python types such as ``str``,
``int``, ``bool``
may result in the :class:`.String`, :class:`.Integer` or
:class:`.Boolean` types being autoamtically selected.
The type of a :func:`.bindparam` is significant especially in that
the type will apply pre-processing to the value before it is
passed to the database. For example, a :func:`.bindparam` which
refers to a datetime value, and is specified as holding the
:class:`.DateTime` type, may apply conversion needed to the
value (such as stringification on SQLite) before passing the value
to the database.
:param unique:
if True, the key name of this :class:`.BindParameter` will be
modified if another :class:`.BindParameter` of the same name
already has been located within the containing
expression. This flag is used generally by the internals
when producing so-called "anonymous" bound expressions, it
isn't generally applicable to explicitly-named :func:`.bindparam`
constructs.
:param required:
If ``True``, a value is required at execution time. If not passed,
it defaults to ``True`` if neither :paramref:`.bindparam.value`
or :paramref:`.bindparam.callable` were passed. If either of these
parameters are present, then :paramref:`.bindparam.required`
defaults to ``False``.
.. versionchanged:: 0.8 If the ``required`` flag is not specified,
it will be set automatically to ``True`` or ``False`` depending
on whether or not the ``value`` or ``callable`` parameters
were specified.
:param quote:
True if this parameter name requires quoting and is not
currently known as a SQLAlchemy reserved word; this currently
only applies to the Oracle backend, where bound names must
sometimes be quoted.
:param isoutparam:
if True, the parameter should be treated like a stored procedure
"OUT" parameter. This applies to backends such as Oracle which
support OUT parameters.
.. seealso::
:ref:`coretutorial_bind_param`
:ref:`coretutorial_insert_expressions`
:func:`.outparam`
"""
if isinstance(key, ColumnClause):
type_ = key.type
key = key.key
if required is NO_ARG:
required = (value is NO_ARG and callable_ is None)
if value is NO_ARG:
value = None
if quote is not None:
key = quoted_name(key, quote)
if unique:
self.key = _anonymous_label('%%(%d %s)s' % (id(self), key
or 'param'))
else:
self.key = key or _anonymous_label('%%(%d param)s'
% id(self))
# identifying key that won't change across
# clones, used to identify the bind's logical
# identity
self._identifying_key = self.key
# key that was passed in the first place, used to
# generate new keys
self._orig_key = key or 'param'
self.unique = unique
self.value = value
self.callable = callable_
self.isoutparam = isoutparam
self.required = required
if type_ is None:
if _compared_to_type is not None:
self.type = \
_compared_to_type.coerce_compared_value(
_compared_to_operator, value)
else:
self.type = type_api._type_map.get(type(value),
type_api.NULLTYPE)
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
def _with_value(self, value):
"""Return a copy of this :class:`.BindParameter` with the given value
set.
"""
cloned = self._clone()
cloned.value = value
cloned.callable = None
cloned.required = False
if cloned.type is type_api.NULLTYPE:
cloned.type = type_api._type_map.get(type(value),
type_api.NULLTYPE)
return cloned
@property
def effective_value(self):
"""Return the value of this bound parameter,
taking into account if the ``callable`` parameter
was set.
The ``callable`` value will be evaluated
and returned if present, else ``value``.
"""
if self.callable:
return self.callable()
else:
return self.value
def _clone(self):
c = ClauseElement._clone(self)
if self.unique:
c.key = _anonymous_label('%%(%d %s)s' % (id(c), c._orig_key
or 'param'))
return c
def _convert_to_unique(self):
if not self.unique:
self.unique = True
self.key = _anonymous_label(
'%%(%d %s)s' % (id(self), self._orig_key or 'param'))
def compare(self, other, **kw):
"""Compare this :class:`BindParameter` to the given
clause."""
return isinstance(other, BindParameter) \
and self.type._compare_type_affinity(other.type) \
and self.value == other.value
def __getstate__(self):
"""execute a deferred value for serialization purposes."""
d = self.__dict__.copy()
v = self.value
if self.callable:
v = self.callable()
d['callable'] = None
d['value'] = v
return d
def __repr__(self):
return 'BindParameter(%r, %r, type_=%r)' % (self.key,
self.value, self.type)
class TypeClause(ClauseElement):
"""Handle a type keyword in a SQL statement.
Used by the ``Case`` statement.
"""
__visit_name__ = 'typeclause'
def __init__(self, type):
self.type = type
class TextClause(Executable, ClauseElement):
"""Represent a literal SQL text fragment.
E.g.::
from sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The :class:`.Text` construct is produced using the :func:`.text`
function; see that function for full documentation.
.. seealso::
:func:`.text`
"""
__visit_name__ = 'textclause'
_bind_params_regex = re.compile(r'(?<![:\w\x5c]):(\w+)(?!:)', re.UNICODE)
_execution_options = \
Executable._execution_options.union(
{'autocommit': PARSE_AUTOCOMMIT})
@property
def _select_iterable(self):
return (self,)
@property
def selectable(self):
return self
_hide_froms = []
# help in those cases where text() is
# interpreted in a column expression situation
key = _label = _resolve_label = None
_allow_label_resolve = False
def __init__(
self,
text,
bind=None):
self._bind = bind
self._bindparams = {}
def repl(m):
self._bindparams[m.group(1)] = BindParameter(m.group(1))
return ':%s' % m.group(1)
# scan the string and search for bind parameter names, add them
# to the list of bindparams
self.text = self._bind_params_regex.sub(repl, text)
@classmethod
def _create_text(self, text, bind=None, bindparams=None,
typemap=None, autocommit=None):
"""Construct a new :class:`.TextClause` clause, representing
a textual SQL string directly.
E.g.::
from sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The advantages :func:`.text` provides over a plain string are
backend-neutral support for bind parameters, per-statement
execution options, as well as
bind parameter and result-column typing behavior, allowing
SQLAlchemy type constructs to play a role when executing
a statement that is specified literally. The construct can also
be provided with a ``.c`` collection of column elements, allowing
it to be embedded in other SQL expression constructs as a subquery.
Bind parameters are specified by name, using the format ``:name``.
E.g.::
t = text("SELECT * FROM users WHERE id=:user_id")
result = connection.execute(t, user_id=12)
For SQL statements where a colon is required verbatim, as within
an inline string, use a backslash to escape::
t = text("SELECT * FROM users WHERE name='\\:username'")
The :class:`.TextClause` construct includes methods which can
provide information about the bound parameters as well as the column
values which would be returned from the textual statement, assuming
it's an executable SELECT type of statement. The
:meth:`.TextClause.bindparams` method is used to provide bound
parameter detail, and :meth:`.TextClause.columns` method allows
specification of return columns including names and types::
t = text("SELECT * FROM users WHERE id=:user_id").\\
bindparams(user_id=7).\\
columns(id=Integer, name=String)
for id, name in connection.execute(t):
print(id, name)
The :func:`.text` construct is used internally in cases when
a literal string is specified for part of a larger query, such as
when a string is specified to the :meth:`.Select.where` method of
:class:`.Select`. In those cases, the same
bind parameter syntax is applied::
s = select([users.c.id, users.c.name]).where("id=:user_id")
result = connection.execute(s, user_id=12)
Using :func:`.text` explicitly usually implies the construction
of a full, standalone statement. As such, SQLAlchemy refers
to it as an :class:`.Executable` object, and it supports
the :meth:`Executable.execution_options` method. For example,
a :func:`.text` construct that should be subject to "autocommit"
can be set explicitly so using the
:paramref:`.Connection.execution_options.autocommit` option::
t = text("EXEC my_procedural_thing()").\\
execution_options(autocommit=True)
Note that SQLAlchemy's usual "autocommit" behavior applies to
:func:`.text` constructs implicitly - that is, statements which begin
with a phrase such as ``INSERT``, ``UPDATE``, ``DELETE``,
or a variety of other phrases specific to certain backends, will
be eligible for autocommit if no transaction is in progress.
:param text:
the text of the SQL statement to be created. use ``:<param>``
to specify bind parameters; they will be compiled to their
engine-specific format.
:param autocommit:
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
:param bind:
an optional connection or engine to be used for this text query.
:param bindparams:
Deprecated. A list of :func:`.bindparam` instances used to
provide information about parameters embedded in the statement.
This argument now invokes the :meth:`.TextClause.bindparams`
method on the construct before returning it. E.g.::
stmt = text("SELECT * FROM table WHERE id=:id",
bindparams=[bindparam('id', value=5, type_=Integer)])
Is equivalent to::
stmt = text("SELECT * FROM table WHERE id=:id").\\
bindparams(bindparam('id', value=5, type_=Integer))
.. deprecated:: 0.9.0 the :meth:`.TextClause.bindparams` method
supersedes the ``bindparams`` argument to :func:`.text`.
:param typemap:
Deprecated. A dictionary mapping the names of columns
represented in the columns clause of a ``SELECT`` statement
to type objects,
which will be used to perform post-processing on columns within
the result set. This parameter now invokes the
:meth:`.TextClause.columns` method, which returns a
:class:`.TextAsFrom` construct that gains a ``.c`` collection and
can be embedded in other expressions. E.g.::
stmt = text("SELECT * FROM table",
typemap={'id': Integer, 'name': String},
)
Is equivalent to::
stmt = text("SELECT * FROM table").columns(id=Integer,
name=String)
Or alternatively::
from sqlalchemy.sql import column
stmt = text("SELECT * FROM table").columns(
column('id', Integer),
column('name', String)
)
.. deprecated:: 0.9.0 the :meth:`.TextClause.columns` method
supersedes the ``typemap`` argument to :func:`.text`.
"""
stmt = TextClause(text, bind=bind)
if bindparams:
stmt = stmt.bindparams(*bindparams)
if typemap:
stmt = stmt.columns(**typemap)
if autocommit is not None:
util.warn_deprecated('autocommit on text() is deprecated. '
'Use .execution_options(autocommit=True)')
stmt = stmt.execution_options(autocommit=autocommit)
return stmt
@_generative
def bindparams(self, *binds, **names_to_values):
"""Establish the values and/or types of bound parameters within
this :class:`.TextClause` construct.
Given a text construct such as::
from sqlalchemy import text
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
the :meth:`.TextClause.bindparams` method can be used to establish
the initial value of ``:name`` and ``:timestamp``,
using simple keyword arguments::
stmt = stmt.bindparams(name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
Where above, new :class:`.BindParameter` objects
will be generated with the names ``name`` and ``timestamp``, and
values of ``jack`` and ``datetime.datetime(2012, 10, 8, 15, 12, 5)``,
respectively. The types will be
inferred from the values given, in this case :class:`.String` and
:class:`.DateTime`.
When specific typing behavior is needed, the positional ``*binds``
argument can be used in which to specify :func:`.bindparam` constructs
directly. These constructs must include at least the ``key``
argument, then an optional value and type::
from sqlalchemy import bindparam
stmt = stmt.bindparams(
bindparam('name', value='jack', type_=String),
bindparam('timestamp', type_=DateTime)
)
Above, we specified the type of :class:`.DateTime` for the
``timestamp`` bind, and the type of :class:`.String` for the ``name``
bind. In the case of ``name`` we also set the default value of
``"jack"``.
Additional bound parameters can be supplied at statement execution
time, e.g.::
result = connection.execute(stmt,
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
The :meth:`.TextClause.bindparams` method can be called repeatedly,
where it will re-use existing :class:`.BindParameter` objects to add
new information. For example, we can call
:meth:`.TextClause.bindparams` first with typing information, and a
second time with value information, and it will be combined::
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
stmt = stmt.bindparams(
bindparam('name', type_=String),
bindparam('timestamp', type_=DateTime)
)
stmt = stmt.bindparams(
name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)
)
.. versionadded:: 0.9.0 The :meth:`.TextClause.bindparams` method
supersedes the argument ``bindparams`` passed to
:func:`~.expression.text`.
"""
self._bindparams = new_params = self._bindparams.copy()
for bind in binds:
try:
existing = new_params[bind.key]
except KeyError:
raise exc.ArgumentError(
"This text() construct doesn't define a "
"bound parameter named %r" % bind.key)
else:
new_params[existing.key] = bind
for key, value in names_to_values.items():
try:
existing = new_params[key]
except KeyError:
raise exc.ArgumentError(
"This text() construct doesn't define a "
"bound parameter named %r" % key)
else:
new_params[key] = existing._with_value(value)
@util.dependencies('sqlalchemy.sql.selectable')
def columns(self, selectable, *cols, **types):
"""Turn this :class:`.TextClause` object into a :class:`.TextAsFrom`
object that can be embedded into another statement.
This function essentially bridges the gap between an entirely
textual SELECT statement and the SQL expression language concept
of a "selectable"::
from sqlalchemy.sql import column, text
stmt = text("SELECT id, name FROM some_table")
stmt = stmt.columns(column('id'), column('name')).alias('st')
stmt = select([mytable]).\\
select_from(
mytable.join(stmt, mytable.c.name == stmt.c.name)
).where(stmt.c.id > 5)
Above, we used untyped :func:`.column` elements. These can also have
types specified, which will impact how the column behaves in
expressions as well as determining result set behavior::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
column('id', Integer),
column('name', Unicode),
column('timestamp', DateTime)
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
Keyword arguments allow just the names and types of columns to be
specified, where the :func:`.column` elements will be generated
automatically::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
id=Integer,
name=Unicode,
timestamp=DateTime
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
The :meth:`.TextClause.columns` method provides a direct
route to calling :meth:`.FromClause.alias` as well as
:meth:`.SelectBase.cte` against a textual SELECT statement::
stmt = stmt.columns(id=Integer, name=String).cte('st')
stmt = select([sometable]).where(sometable.c.id == stmt.c.id)
.. versionadded:: 0.9.0 :func:`.text` can now be converted into a
fully featured "selectable" construct using the
:meth:`.TextClause.columns` method. This method supersedes the
``typemap`` argument to :func:`.text`.
"""
input_cols = [
ColumnClause(col.key, types.pop(col.key))
if col.key in types
else col
for col in cols
] + [ColumnClause(key, type_) for key, type_ in types.items()]
return selectable.TextAsFrom(self, input_cols)
@property
def type(self):
return type_api.NULLTYPE
@property
def comparator(self):
return self.type.comparator_factory(self)
def self_group(self, against=None):
if against is operators.in_op:
return Grouping(self)
else:
return self
def _copy_internals(self, clone=_clone, **kw):
self._bindparams = dict((b.key, clone(b, **kw))
for b in self._bindparams.values())
def get_children(self, **kwargs):
return list(self._bindparams.values())
def compare(self, other):
return isinstance(other, TextClause) and other.text == self.text
class Null(ColumnElement):
"""Represent the NULL keyword in a SQL statement.
:class:`.Null` is accessed as a constant via the
:func:`.null` function.
"""
__visit_name__ = 'null'
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@classmethod
def _instance(cls):
"""Return a constant :class:`.Null` construct."""
return Null()
def compare(self, other):
return isinstance(other, Null)
class False_(ColumnElement):
"""Represent the ``false`` keyword, or equivalent, in a SQL statement.
:class:`.False_` is accessed as a constant via the
:func:`.false` function.
"""
__visit_name__ = 'false'
@util.memoized_property
def type(self):
return type_api.BOOLEANTYPE
def _negate(self):
return True_()
@classmethod
def _instance(cls):
"""Return a :class:`.False_` construct.
E.g.::
>>> from sqlalchemy import false
>>> print select([t.c.x]).where(false())
SELECT x FROM t WHERE false
A backend which does not support true/false constants will render as
an expression against 1 or 0::
>>> print select([t.c.x]).where(false())
SELECT x FROM t WHERE 0 = 1
The :func:`.true` and :func:`.false` constants also feature
"short circuit" operation within an :func:`.and_` or :func:`.or_`
conjunction::
>>> print select([t.c.x]).where(or_(t.c.x > 5, true()))
SELECT x FROM t WHERE true
>>> print select([t.c.x]).where(and_(t.c.x > 5, false()))
SELECT x FROM t WHERE false
.. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature
better integrated behavior within conjunctions and on dialects
that don't support true/false constants.
.. seealso::
:func:`.true`
"""
return False_()
def compare(self, other):
return isinstance(other, False_)
class True_(ColumnElement):
"""Represent the ``true`` keyword, or equivalent, in a SQL statement.
:class:`.True_` is accessed as a constant via the
:func:`.true` function.
"""
__visit_name__ = 'true'
@util.memoized_property
def type(self):
return type_api.BOOLEANTYPE
def _negate(self):
return False_()
@classmethod
def _ifnone(cls, other):
if other is None:
return cls._instance()
else:
return other
@classmethod
def _instance(cls):
"""Return a constant :class:`.True_` construct.
E.g.::
>>> from sqlalchemy import true
>>> print select([t.c.x]).where(true())
SELECT x FROM t WHERE true
A backend which does not support true/false constants will render as
an expression against 1 or 0::
>>> print select([t.c.x]).where(true())
SELECT x FROM t WHERE 1 = 1
The :func:`.true` and :func:`.false` constants also feature
"short circuit" operation within an :func:`.and_` or :func:`.or_`
conjunction::
>>> print select([t.c.x]).where(or_(t.c.x > 5, true()))
SELECT x FROM t WHERE true
>>> print select([t.c.x]).where(and_(t.c.x > 5, false()))
SELECT x FROM t WHERE false
.. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature
better integrated behavior within conjunctions and on dialects
that don't support true/false constants.
.. seealso::
:func:`.false`
"""
return True_()
def compare(self, other):
return isinstance(other, True_)
class ClauseList(ClauseElement):
"""Describe a list of clauses, separated by an operator.
By default, is comma-separated, such as a column listing.
"""
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
self.operator = kwargs.pop('operator', operators.comma_op)
self.group = kwargs.pop('group', True)
self.group_contents = kwargs.pop('group_contents', True)
text_converter = kwargs.pop(
'_literal_as_text',
_expression_literal_as_text)
if self.group_contents:
self.clauses = [
text_converter(clause).self_group(against=self.operator)
for clause in clauses]
else:
self.clauses = [
text_converter(clause)
for clause in clauses]
def __iter__(self):
return iter(self.clauses)
def __len__(self):
return len(self.clauses)
@property
def _select_iterable(self):
return iter(self)
def append(self, clause):
if self.group_contents:
self.clauses.append(_literal_as_text(clause).
self_group(against=self.operator))
else:
self.clauses.append(_literal_as_text(clause))
def _copy_internals(self, clone=_clone, **kw):
self.clauses = [clone(clause, **kw) for clause in self.clauses]
def get_children(self, **kwargs):
return self.clauses
@property
def _from_objects(self):
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def self_group(self, against=None):
if self.group and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def compare(self, other, **kw):
"""Compare this :class:`.ClauseList` to the given :class:`.ClauseList`,
including a comparison of all the clause items.
"""
if not isinstance(other, ClauseList) and len(self.clauses) == 1:
return self.clauses[0].compare(other, **kw)
elif isinstance(other, ClauseList) and \
len(self.clauses) == len(other.clauses):
for i in range(0, len(self.clauses)):
if not self.clauses[i].compare(other.clauses[i], **kw):
return False
else:
return self.operator == other.operator
else:
return False
class BooleanClauseList(ClauseList, ColumnElement):
__visit_name__ = 'clauselist'
def __init__(self, *arg, **kw):
raise NotImplementedError(
"BooleanClauseList has a private constructor")
@classmethod
def _construct(cls, operator, continue_on, skip_on, *clauses, **kw):
convert_clauses = []
clauses = util.coerce_generator_arg(clauses)
for clause in clauses:
clause = _expression_literal_as_text(clause)
if isinstance(clause, continue_on):
continue
elif isinstance(clause, skip_on):
return clause.self_group(against=operators._asbool)
convert_clauses.append(clause)
if len(convert_clauses) == 1:
return convert_clauses[0].self_group(against=operators._asbool)
elif not convert_clauses and clauses:
return clauses[0].self_group(against=operators._asbool)
convert_clauses = [c.self_group(against=operator)
for c in convert_clauses]
self = cls.__new__(cls)
self.clauses = convert_clauses
self.group = True
self.operator = operator
self.group_contents = True
self.type = type_api.BOOLEANTYPE
return self
@classmethod
def and_(cls, *clauses):
"""Produce a conjunction of expressions joined by ``AND``.
E.g.::
from sqlalchemy import and_
stmt = select([users_table]).where(
and_(
users_table.c.name == 'wendy',
users_table.c.enrolled == True
)
)
The :func:`.and_` conjunction is also available using the
Python ``&`` operator (though note that compound expressions
need to be parenthesized in order to function with Python
operator precedence behavior)::
stmt = select([users_table]).where(
(users_table.c.name == 'wendy') &
(users_table.c.enrolled == True)
)
The :func:`.and_` operation is also implicit in some cases;
the :meth:`.Select.where` method for example can be invoked multiple
times against a statement, which will have the effect of each
clause being combined using :func:`.and_`::
stmt = select([users_table]).\\
where(users_table.c.name == 'wendy').\\
where(users_table.c.enrolled == True)
.. seealso::
:func:`.or_`
"""
return cls._construct(operators.and_, True_, False_, *clauses)
@classmethod
def or_(cls, *clauses):
"""Produce a conjunction of expressions joined by ``OR``.
E.g.::
from sqlalchemy import or_
stmt = select([users_table]).where(
or_(
users_table.c.name == 'wendy',
users_table.c.name == 'jack'
)
)
The :func:`.or_` conjunction is also available using the
Python ``|`` operator (though note that compound expressions
need to be parenthesized in order to function with Python
operator precedence behavior)::
stmt = select([users_table]).where(
(users_table.c.name == 'wendy') |
(users_table.c.name == 'jack')
)
.. seealso::
:func:`.and_`
"""
return cls._construct(operators.or_, False_, True_, *clauses)
@property
def _select_iterable(self):
return (self, )
def self_group(self, against=None):
if not self.clauses:
return self
else:
return super(BooleanClauseList, self).self_group(against=against)
def _negate(self):
return ClauseList._negate(self)
and_ = BooleanClauseList.and_
or_ = BooleanClauseList.or_
class Tuple(ClauseList, ColumnElement):
"""Represent a SQL tuple."""
def __init__(self, *clauses, **kw):
"""Return a :class:`.Tuple`.
Main usage is to produce a composite IN construct::
from sqlalchemy import tuple_
tuple_(table.c.col1, table.c.col2).in_(
[(1, 2), (5, 12), (10, 19)]
)
.. warning::
The composite IN construct is not supported by all backends,
and is currently known to work on Postgresql and MySQL,
but not SQLite. Unsupported backends will raise
a subclass of :class:`~sqlalchemy.exc.DBAPIError` when such
an expression is invoked.
"""
clauses = [_literal_as_binds(c) for c in clauses]
self._type_tuple = [arg.type for arg in clauses]
self.type = kw.pop('type_', self._type_tuple[0]
if self._type_tuple else type_api.NULLTYPE)
super(Tuple, self).__init__(*clauses, **kw)
@property
def _select_iterable(self):
return (self, )
def _bind_param(self, operator, obj):
return Tuple(*[
BindParameter(None, o, _compared_to_operator=operator,
_compared_to_type=type_, unique=True)
for o, type_ in zip(obj, self._type_tuple)
]).self_group()
class Case(ColumnElement):
"""Represent a ``CASE`` expression.
:class:`.Case` is produced using the :func:`.case` factory function,
as in::
from sqlalchemy import case
stmt = select([users_table]).\\
where(
case(
[
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
],
else_='E'
)
)
Details on :class:`.Case` usage is at :func:`.case`.
.. seealso::
:func:`.case`
"""
__visit_name__ = 'case'
def __init__(self, whens, value=None, else_=None):
"""Produce a ``CASE`` expression.
The ``CASE`` construct in SQL is a conditional object that
acts somewhat analogously to an "if/then" construct in other
languages. It returns an instance of :class:`.Case`.
:func:`.case` in its usual form is passed a list of "when"
constructs, that is, a list of conditions and results as tuples::
from sqlalchemy import case
stmt = select([users_table]).\\
where(
case(
[
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
],
else_='E'
)
)
The above statement will produce SQL resembling::
SELECT id, name FROM user
WHERE CASE
WHEN (name = :name_1) THEN :param_1
WHEN (name = :name_2) THEN :param_2
ELSE :param_3
END
When simple equality expressions of several values against a single
parent column are needed, :func:`.case` also has a "shorthand" format
used via the
:paramref:`.case.value` parameter, which is passed a column
expression to be compared. In this form, the :paramref:`.case.whens`
parameter is passed as a dictionary containing expressions to be
compared against keyed to result expressions. The statement below is
equivalent to the preceding statement::
stmt = select([users_table]).\\
where(
case(
{"wendy": "W", "jack": "J"},
value=users_table.c.name,
else_='E'
)
)
The values which are accepted as result values in
:paramref:`.case.whens` as well as with :paramref:`.case.else_` are
coerced from Python literals into :func:`.bindparam` constructs.
SQL expressions, e.g. :class:`.ColumnElement` constructs, are accepted
as well. To coerce a literal string expression into a constant
expression rendered inline, use the :func:`.literal_column` construct,
as in::
from sqlalchemy import case, literal_column
case(
[
(
orderline.c.qty > 100,
literal_column("'greaterthan100'")
),
(
orderline.c.qty > 10,
literal_column("'greaterthan10'")
)
],
else_=literal_column("'lessthan10'")
)
The above will render the given constants without using bound
parameters for the result values (but still for the comparison
values), as in::
CASE
WHEN (orderline.qty > :qty_1) THEN 'greaterthan100'
WHEN (orderline.qty > :qty_2) THEN 'greaterthan10'
ELSE 'lessthan10'
END
:param whens: The criteria to be compared against,
:paramref:`.case.whens` accepts two different forms, based on
whether or not :paramref:`.case.value` is used.
In the first form, it accepts a list of 2-tuples; each 2-tuple
consists of ``(<sql expression>, <value>)``, where the SQL
expression is a boolean expression and "value" is a resulting value,
e.g.::
case([
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
])
In the second form, it accepts a Python dictionary of comparison
values mapped to a resulting value; this form requires
:paramref:`.case.value` to be present, and values will be compared
using the ``==`` operator, e.g.::
case(
{"wendy": "W", "jack": "J"},
value=users_table.c.name
)
:param value: An optional SQL expression which will be used as a
fixed "comparison point" for candidate values within a dictionary
passed to :paramref:`.case.whens`.
:param else\_: An optional SQL expression which will be the evaluated
result of the ``CASE`` construct if all expressions within
:paramref:`.case.whens` evaluate to false. When omitted, most
databases will produce a result of NULL if none of the "when"
expressions evaluate to true.
"""
try:
whens = util.dictlike_iteritems(whens)
except TypeError:
pass
if value is not None:
whenlist = [
(_literal_as_binds(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
else:
whenlist = [
(_no_literals(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
if whenlist:
type_ = list(whenlist[-1])[-1].type
else:
type_ = None
if value is None:
self.value = None
else:
self.value = _literal_as_binds(value)
self.type = type_
self.whens = whenlist
if else_ is not None:
self.else_ = _literal_as_binds(else_)
else:
self.else_ = None
def _copy_internals(self, clone=_clone, **kw):
if self.value is not None:
self.value = clone(self.value, **kw)
self.whens = [(clone(x, **kw), clone(y, **kw))
for x, y in self.whens]
if self.else_ is not None:
self.else_ = clone(self.else_, **kw)
def get_children(self, **kwargs):
if self.value is not None:
yield self.value
for x, y in self.whens:
yield x
yield y
if self.else_ is not None:
yield self.else_
@property
def _from_objects(self):
return list(itertools.chain(*[x._from_objects for x in
self.get_children()]))
def literal_column(text, type_=None):
"""Produce a :class:`.ColumnClause` object that has the
:paramref:`.column.is_literal` flag set to True.
:func:`.literal_column` is similar to :func:`.column`, except that
it is more often used as a "standalone" column expression that renders
exactly as stated; while :func:`.column` stores a string name that
will be assumed to be part of a table and may be quoted as such,
:func:`.literal_column` can be that, or any other arbitrary column-oriented
expression.
:param text: the text of the expression; can be any SQL expression.
Quoting rules will not be applied. To specify a column-name expression
which should be subject to quoting rules, use the :func:`column`
function.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine`
object which will
provide result-set translation and additional expression semantics for
this column. If left as None the type will be NullType.
.. seealso::
:func:`.column`
:func:`.text`
:ref:`sqlexpression_literal_column`
"""
return ColumnClause(text, type_=type_, is_literal=True)
class Cast(ColumnElement):
"""Represent a ``CAST`` expression.
:class:`.Cast` is produced using the :func:`.cast` factory function,
as in::
from sqlalchemy import cast, Numeric
stmt = select([
cast(product_table.c.unit_price, Numeric(10, 4))
])
Details on :class:`.Cast` usage is at :func:`.cast`.
.. seealso::
:func:`.cast`
"""
__visit_name__ = 'cast'
def __init__(self, expression, type_):
"""Produce a ``CAST`` expression.
:func:`.cast` returns an instance of :class:`.Cast`.
E.g.::
from sqlalchemy import cast, Numeric
stmt = select([
cast(product_table.c.unit_price, Numeric(10, 4))
])
The above statement will produce SQL resembling::
SELECT CAST(unit_price AS NUMERIC(10, 4)) FROM product
The :func:`.cast` function performs two distinct functions when
used. The first is that it renders the ``CAST`` expression within
the resulting SQL string. The second is that it associates the given
type (e.g. :class:`.TypeEngine` class or instance) with the column
expression on the Python side, which means the expression will take
on the expression operator behavior associated with that type,
as well as the bound-value handling and result-row-handling behavior
of the type.
.. versionchanged:: 0.9.0 :func:`.cast` now applies the given type
to the expression such that it takes effect on the bound-value,
e.g. the Python-to-database direction, in addition to the
result handling, e.g. database-to-Python, direction.
An alternative to :func:`.cast` is the :func:`.type_coerce` function.
This function performs the second task of associating an expression
with a specific type, but does not render the ``CAST`` expression
in SQL.
:param expression: A SQL expression, such as a :class:`.ColumnElement`
expression or a Python string which will be coerced into a bound
literal value.
:param type_: A :class:`.TypeEngine` class or instance indicating
the type to which the ``CAST`` should apply.
.. seealso::
:func:`.type_coerce` - Python-side type coercion without emitting
CAST.
"""
self.type = type_api.to_instance(type_)
self.clause = _literal_as_binds(expression, type_=self.type)
self.typeclause = TypeClause(self.type)
def _copy_internals(self, clone=_clone, **kw):
self.clause = clone(self.clause, **kw)
self.typeclause = clone(self.typeclause, **kw)
def get_children(self, **kwargs):
return self.clause, self.typeclause
@property
def _from_objects(self):
return self.clause._from_objects
class Extract(ColumnElement):
"""Represent a SQL EXTRACT clause, ``extract(field FROM expr)``."""
__visit_name__ = 'extract'
def __init__(self, field, expr, **kwargs):
"""Return a :class:`.Extract` construct.
This is typically available as :func:`.extract`
as well as ``func.extract`` from the
:data:`.func` namespace.
"""
self.type = type_api.INTEGERTYPE
self.field = field
self.expr = _literal_as_binds(expr, None)
def _copy_internals(self, clone=_clone, **kw):
self.expr = clone(self.expr, **kw)
def get_children(self, **kwargs):
return self.expr,
@property
def _from_objects(self):
return self.expr._from_objects
class _label_reference(ColumnElement):
"""Wrap a column expression as it appears in a 'reference' context.
This expression is any that inclues an _order_by_label_element,
which is a Label, or a DESC / ASC construct wrapping a Label.
The production of _label_reference() should occur when an expression
is added to this context; this includes the ORDER BY or GROUP BY of a
SELECT statement, as well as a few other places, such as the ORDER BY
within an OVER clause.
"""
__visit_name__ = 'label_reference'
def __init__(self, element):
self.element = element
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
@property
def _from_objects(self):
return ()
class _textual_label_reference(ColumnElement):
__visit_name__ = 'textual_label_reference'
def __init__(self, element):
self.element = element
@util.memoized_property
def _text_clause(self):
return TextClause._create_text(self.element)
class UnaryExpression(ColumnElement):
"""Define a 'unary' expression.
A unary expression has a single column expression
and an operator. The operator can be placed on the left
(where it is called the 'operator') or right (where it is called the
'modifier') of the column expression.
:class:`.UnaryExpression` is the basis for several unary operators
including those used by :func:`.desc`, :func:`.asc`, :func:`.distinct`,
:func:`.nullsfirst` and :func:`.nullslast`.
"""
__visit_name__ = 'unary'
def __init__(self, element, operator=None, modifier=None,
type_=None, negate=None, wraps_column_expression=False):
self.operator = operator
self.modifier = modifier
self.element = element.self_group(
against=self.operator or self.modifier)
self.type = type_api.to_instance(type_)
self.negate = negate
self.wraps_column_expression = wraps_column_expression
@classmethod
def _create_nullsfirst(cls, column):
"""Produce the ``NULLS FIRST`` modifier for an ``ORDER BY`` expression.
:func:`.nullsfirst` is intended to modify the expression produced
by :func:`.asc` or :func:`.desc`, and indicates how NULL values
should be handled when they are encountered during ordering::
from sqlalchemy import desc, nullsfirst
stmt = select([users_table]).\\
order_by(nullsfirst(desc(users_table.c.name)))
The SQL expression from the above would resemble::
SELECT id, name FROM user ORDER BY name DESC NULLS FIRST
Like :func:`.asc` and :func:`.desc`, :func:`.nullsfirst` is typically
invoked from the column expression itself using
:meth:`.ColumnElement.nullsfirst`, rather than as its standalone
function version, as in::
stmt = (select([users_table]).
order_by(users_table.c.name.desc().nullsfirst())
)
.. seealso::
:func:`.asc`
:func:`.desc`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_label_reference(column),
modifier=operators.nullsfirst_op,
wraps_column_expression=False)
@classmethod
def _create_nullslast(cls, column):
"""Produce the ``NULLS LAST`` modifier for an ``ORDER BY`` expression.
:func:`.nullslast` is intended to modify the expression produced
by :func:`.asc` or :func:`.desc`, and indicates how NULL values
should be handled when they are encountered during ordering::
from sqlalchemy import desc, nullslast
stmt = select([users_table]).\\
order_by(nullslast(desc(users_table.c.name)))
The SQL expression from the above would resemble::
SELECT id, name FROM user ORDER BY name DESC NULLS LAST
Like :func:`.asc` and :func:`.desc`, :func:`.nullslast` is typically
invoked from the column expression itself using
:meth:`.ColumnElement.nullslast`, rather than as its standalone
function version, as in::
stmt = select([users_table]).\\
order_by(users_table.c.name.desc().nullslast())
.. seealso::
:func:`.asc`
:func:`.desc`
:func:`.nullsfirst`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_label_reference(column),
modifier=operators.nullslast_op,
wraps_column_expression=False)
@classmethod
def _create_desc(cls, column):
"""Produce a descending ``ORDER BY`` clause element.
e.g.::
from sqlalchemy import desc
stmt = select([users_table]).order_by(desc(users_table.c.name))
will produce SQL as::
SELECT id, name FROM user ORDER BY name DESC
The :func:`.desc` function is a standalone version of the
:meth:`.ColumnElement.desc` method available on all SQL expressions,
e.g.::
stmt = select([users_table]).order_by(users_table.c.name.desc())
:param column: A :class:`.ColumnElement` (e.g. scalar SQL expression)
with which to apply the :func:`.desc` operation.
.. seealso::
:func:`.asc`
:func:`.nullsfirst`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_label_reference(column),
modifier=operators.desc_op,
wraps_column_expression=False)
@classmethod
def _create_asc(cls, column):
"""Produce an ascending ``ORDER BY`` clause element.
e.g.::
from sqlalchemy import asc
stmt = select([users_table]).order_by(asc(users_table.c.name))
will produce SQL as::
SELECT id, name FROM user ORDER BY name ASC
The :func:`.asc` function is a standalone version of the
:meth:`.ColumnElement.asc` method available on all SQL expressions,
e.g.::
stmt = select([users_table]).order_by(users_table.c.name.asc())
:param column: A :class:`.ColumnElement` (e.g. scalar SQL expression)
with which to apply the :func:`.asc` operation.
.. seealso::
:func:`.desc`
:func:`.nullsfirst`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_label_reference(column),
modifier=operators.asc_op,
wraps_column_expression=False)
@classmethod
def _create_distinct(cls, expr):
"""Produce an column-expression-level unary ``DISTINCT`` clause.
This applies the ``DISTINCT`` keyword to an individual column
expression, and is typically contained within an aggregate function,
as in::
from sqlalchemy import distinct, func
stmt = select([func.count(distinct(users_table.c.name))])
The above would produce an expression resembling::
SELECT COUNT(DISTINCT name) FROM user
The :func:`.distinct` function is also available as a column-level
method, e.g. :meth:`.ColumnElement.distinct`, as in::
stmt = select([func.count(users_table.c.name.distinct())])
The :func:`.distinct` operator is different from the
:meth:`.Select.distinct` method of :class:`.Select`,
which produces a ``SELECT`` statement
with ``DISTINCT`` applied to the result set as a whole,
e.g. a ``SELECT DISTINCT`` expression. See that method for further
information.
.. seealso::
:meth:`.ColumnElement.distinct`
:meth:`.Select.distinct`
:data:`.func`
"""
expr = _literal_as_binds(expr)
return UnaryExpression(
expr, operator=operators.distinct_op,
type_=expr.type, wraps_column_expression=False)
@property
def _order_by_label_element(self):
if self.modifier in (operators.desc_op, operators.asc_op):
return self.element._order_by_label_element
else:
return None
@property
def _from_objects(self):
return self.element._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
def compare(self, other, **kw):
"""Compare this :class:`UnaryExpression` against the given
:class:`.ClauseElement`."""
return (
isinstance(other, UnaryExpression) and
self.operator == other.operator and
self.modifier == other.modifier and
self.element.compare(other.element, **kw)
)
def _negate(self):
if self.negate is not None:
return UnaryExpression(
self.element,
operator=self.negate,
negate=self.operator,
modifier=self.modifier,
type_=self.type,
wraps_column_expression=self.wraps_column_expression)
else:
return ClauseElement._negate(self)
def self_group(self, against=None):
if self.operator and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
class AsBoolean(UnaryExpression):
def __init__(self, element, operator, negate):
self.element = element
self.type = type_api.BOOLEANTYPE
self.operator = operator
self.negate = negate
self.modifier = None
self.wraps_column_expression = True
def self_group(self, against=None):
return self
def _negate(self):
return self.element._negate()
class BinaryExpression(ColumnElement):
"""Represent an expression that is ``LEFT <operator> RIGHT``.
A :class:`.BinaryExpression` is generated automatically
whenever two column expressions are used in a Python binary expression::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
"""
__visit_name__ = 'binary'
def __init__(self, left, right, operator, type_=None,
negate=None, modifiers=None):
# allow compatibility with libraries that
# refer to BinaryExpression directly and pass strings
if isinstance(operator, util.string_types):
operator = operators.custom_op(operator)
self._orig = (left, right)
self.left = left.self_group(against=operator)
self.right = right.self_group(against=operator)
self.operator = operator
self.type = type_api.to_instance(type_)
self.negate = negate
if modifiers is None:
self.modifiers = {}
else:
self.modifiers = modifiers
def __bool__(self):
if self.operator in (operator.eq, operator.ne):
return self.operator(hash(self._orig[0]), hash(self._orig[1]))
else:
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
@property
def is_comparison(self):
return operators.is_comparison(self.operator)
@property
def _from_objects(self):
return self.left._from_objects + self.right._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.left = clone(self.left, **kw)
self.right = clone(self.right, **kw)
def get_children(self, **kwargs):
return self.left, self.right
def compare(self, other, **kw):
"""Compare this :class:`BinaryExpression` against the
given :class:`BinaryExpression`."""
return (
isinstance(other, BinaryExpression) and
self.operator == other.operator and
(
self.left.compare(other.left, **kw) and
self.right.compare(other.right, **kw) or
(
operators.is_commutative(self.operator) and
self.left.compare(other.right, **kw) and
self.right.compare(other.left, **kw)
)
)
)
def self_group(self, against=None):
if operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def _negate(self):
if self.negate is not None:
return BinaryExpression(
self.left,
self.right,
self.negate,
negate=self.operator,
type_=self.type,
modifiers=self.modifiers)
else:
return super(BinaryExpression, self)._negate()
class Grouping(ColumnElement):
"""Represent a grouping within a column expression"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
self.type = getattr(element, 'type', type_api.NULLTYPE)
def self_group(self, against=None):
return self
@property
def _key_label(self):
return self._label
@property
def _label(self):
return getattr(self.element, '_label', None) or self.anon_label
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element': self.element, 'type': self.type}
def __setstate__(self, state):
self.element = state['element']
self.type = state['type']
def compare(self, other, **kw):
return isinstance(other, Grouping) and \
self.element.compare(other.element)
class Over(ColumnElement):
"""Represent an OVER clause.
This is a special operator against a so-called
"window" function, as well as any aggregate function,
which produces results relative to the result set
itself. It's supported only by certain database
backends.
"""
__visit_name__ = 'over'
order_by = None
partition_by = None
def __init__(self, func, partition_by=None, order_by=None):
"""Produce an :class:`.Over` object against a function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
E.g.::
from sqlalchemy import over
over(func.row_number(), order_by='x')
Would produce "ROW_NUMBER() OVER(ORDER BY x)".
:param func: a :class:`.FunctionElement` construct, typically
generated by :data:`~.expression.func`.
:param partition_by: a column element or string, or a list
of such, that will be used as the PARTITION BY clause
of the OVER construct.
:param order_by: a column element or string, or a list
of such, that will be used as the ORDER BY clause
of the OVER construct.
This function is also available from the :data:`~.expression.func`
construct itself via the :meth:`.FunctionElement.over` method.
.. versionadded:: 0.7
"""
self.func = func
if order_by is not None:
self.order_by = ClauseList(
*util.to_list(order_by),
_literal_as_text=_literal_as_label_reference)
if partition_by is not None:
self.partition_by = ClauseList(
*util.to_list(partition_by),
_literal_as_text=_literal_as_label_reference)
@util.memoized_property
def type(self):
return self.func.type
def get_children(self, **kwargs):
return [c for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
def _copy_internals(self, clone=_clone, **kw):
self.func = clone(self.func, **kw)
if self.partition_by is not None:
self.partition_by = clone(self.partition_by, **kw)
if self.order_by is not None:
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return list(itertools.chain(
*[c._from_objects for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
))
class FunctionFilter(ColumnElement):
"""Represent a function FILTER clause.
This is a special operator against aggregate and window functions,
which controls which rows are passed to it.
It's supported only by certain database backends.
Invocation of :class:`.FunctionFilter` is via
:meth:`.FunctionElement.filter`::
func.count(1).filter(True)
.. versionadded:: 1.0.0
.. seealso::
:meth:`.FunctionElement.filter`
"""
__visit_name__ = 'funcfilter'
criterion = None
def __init__(self, func, *criterion):
"""Produce a :class:`.FunctionFilter` object against a function.
Used against aggregate and window functions,
for database backends that support the "FILTER" clause.
E.g.::
from sqlalchemy import funcfilter
funcfilter(func.count(1), MyClass.name == 'some name')
Would produce "COUNT(1) FILTER (WHERE myclass.name = 'some name')".
This function is also available from the :data:`~.expression.func`
construct itself via the :meth:`.FunctionElement.filter` method.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.FunctionElement.filter`
"""
self.func = func
self.filter(*criterion)
def filter(self, *criterion):
"""Produce an additional FILTER against the function.
This method adds additional criteria to the initial criteria
set up by :meth:`.FunctionElement.filter`.
Multiple criteria are joined together at SQL render time
via ``AND``.
"""
for criterion in list(criterion):
criterion = _expression_literal_as_text(criterion)
if self.criterion is not None:
self.criterion = self.criterion & criterion
else:
self.criterion = criterion
return self
def over(self, partition_by=None, order_by=None):
"""Produce an OVER clause against this filtered function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
The expression::
func.rank().filter(MyClass.y > 5).over(order_by='x')
is shorthand for::
from sqlalchemy import over, funcfilter
over(funcfilter(func.rank(), MyClass.y > 5), order_by='x')
See :func:`~.expression.over` for a full description.
"""
return Over(self, partition_by=partition_by, order_by=order_by)
@util.memoized_property
def type(self):
return self.func.type
def get_children(self, **kwargs):
return [c for c in
(self.func, self.criterion)
if c is not None]
def _copy_internals(self, clone=_clone, **kw):
self.func = clone(self.func, **kw)
if self.criterion is not None:
self.criterion = clone(self.criterion, **kw)
@property
def _from_objects(self):
return list(itertools.chain(
*[c._from_objects for c in (self.func, self.criterion)
if c is not None]
))
class Label(ColumnElement):
"""Represents a column label (AS).
Represent a label, as typically applied to any column-level
element using the ``AS`` sql keyword.
"""
__visit_name__ = 'label'
def __init__(self, name, element, type_=None):
"""Return a :class:`Label` object for the
given :class:`.ColumnElement`.
A label changes the name of an element in the columns clause of a
``SELECT`` statement, typically via the ``AS`` SQL keyword.
This functionality is more conveniently available via the
:meth:`.ColumnElement.label` method on :class:`.ColumnElement`.
:param name: label name
:param obj: a :class:`.ColumnElement`.
"""
if isinstance(element, Label):
self._resolve_label = element._label
while isinstance(element, Label):
element = element.element
if name:
self.name = name
self._resolve_label = self.name
else:
self.name = _anonymous_label(
'%%(%d %s)s' % (id(self), getattr(element, 'name', 'anon'))
)
self.key = self._label = self._key_label = self.name
self._element = element
self._type = type_
self._proxies = [element]
def __reduce__(self):
return self.__class__, (self.name, self._element, self._type)
@util.memoized_property
def _allow_label_resolve(self):
return self.element._allow_label_resolve
@property
def _order_by_label_element(self):
return self
@util.memoized_property
def type(self):
return type_api.to_instance(
self._type or getattr(self._element, 'type', None)
)
@util.memoized_property
def element(self):
return self._element.self_group(against=operators.as_)
def self_group(self, against=None):
sub_element = self._element.self_group(against=against)
if sub_element is not self._element:
return Label(self.name,
sub_element,
type_=self._type)
else:
return self
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
return self.element.foreign_keys
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone, anonymize_labels=False, **kw):
self.element = clone(self.element, **kw)
self.__dict__.pop('_allow_label_resolve', None)
if anonymize_labels:
self.name = self._resolve_label = _anonymous_label(
'%%(%d %s)s' % (
id(self), getattr(self.element, 'name', 'anon'))
)
self.key = self._label = self._key_label = self.name
@property
def _from_objects(self):
return self.element._from_objects
def _make_proxy(self, selectable, name=None, **kw):
e = self.element._make_proxy(selectable,
name=name if name else self.name)
e._proxies.append(self)
if self._type is not None:
e.type = self._type
return e
class ColumnClause(Immutable, ColumnElement):
"""Represents a column expression from any textual string.
The :class:`.ColumnClause`, a lightweight analogue to the
:class:`.Column` class, is typically invoked using the
:func:`.column` function, as in::
from sqlalchemy import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The above statement would produce SQL like::
SELECT id, name FROM user
:class:`.ColumnClause` is the immediate superclass of the schema-specific
:class:`.Column` object. While the :class:`.Column` class has all the
same capabilities as :class:`.ColumnClause`, the :class:`.ColumnClause`
class is usable by itself in those cases where behavioral requirements
are limited to simple SQL expression generation. The object has none of
the associations with schema-level metadata or with execution-time
behavior that :class:`.Column` does, so in that sense is a "lightweight"
version of :class:`.Column`.
Full details on :class:`.ColumnClause` usage is at :func:`.column`.
.. seealso::
:func:`.column`
:class:`.Column`
"""
__visit_name__ = 'column'
onupdate = default = server_default = server_onupdate = None
_memoized_property = util.group_expirable_memoized_property()
def __init__(self, text, type_=None, is_literal=False, _selectable=None):
"""Produce a :class:`.ColumnClause` object.
The :class:`.ColumnClause` is a lightweight analogue to the
:class:`.Column` class. The :func:`.column` function can
be invoked with just a name alone, as in::
from sqlalchemy import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The above statement would produce SQL like::
SELECT id, name FROM user
Once constructed, :func:`.column` may be used like any other SQL
expression element such as within :func:`.select` constructs::
from sqlalchemy.sql import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The text handled by :func:`.column` is assumed to be handled
like the name of a database column; if the string contains mixed case,
special characters, or matches a known reserved word on the target
backend, the column expression will render using the quoting
behavior determined by the backend. To produce a textual SQL
expression that is rendered exactly without any quoting,
use :func:`.literal_column` instead, or pass ``True`` as the
value of :paramref:`.column.is_literal`. Additionally, full SQL
statements are best handled using the :func:`.text` construct.
:func:`.column` can be used in a table-like
fashion by combining it with the :func:`.table` function
(which is the lightweight analogue to :class:`.Table`) to produce
a working table construct with minimal boilerplate::
from sqlalchemy import table, column, select
user = table("user",
column("id"),
column("name"),
column("description"),
)
stmt = select([user.c.description]).where(user.c.name == 'wendy')
A :func:`.column` / :func:`.table` construct like that illustrated
above can be created in an
ad-hoc fashion and is not associated with any
:class:`.schema.MetaData`, DDL, or events, unlike its
:class:`.Table` counterpart.
.. versionchanged:: 1.0.0 :func:`.expression.column` can now
be imported from the plain ``sqlalchemy`` namespace like any
other SQL element.
:param text: the text of the element.
:param type: :class:`.types.TypeEngine` object which can associate
this :class:`.ColumnClause` with a type.
:param is_literal: if True, the :class:`.ColumnClause` is assumed to
be an exact expression that will be delivered to the output with no
quoting rules applied regardless of case sensitive settings. the
:func:`.literal_column()` function essentially invokes
:func:`.column` while passing ``is_literal=True``.
.. seealso::
:class:`.Column`
:func:`.literal_column`
:func:`.table`
:func:`.text`
:ref:`sqlexpression_literal_column`
"""
self.key = self.name = text
self.table = _selectable
self.type = type_api.to_instance(type_)
self.is_literal = is_literal
def _compare_name_for_result(self, other):
if self.is_literal or \
self.table is None or self.table._textual or \
not hasattr(other, 'proxy_set') or (
isinstance(other, ColumnClause) and
(other.is_literal or
other.table is None or
other.table._textual)
):
return (hasattr(other, 'name') and self.name == other.name) or \
(hasattr(other, '_label') and self._label == other._label)
else:
return other.proxy_set.intersection(self.proxy_set)
def _get_table(self):
return self.__dict__['table']
def _set_table(self, table):
self._memoized_property.expire_instance(self)
self.__dict__['table'] = table
table = property(_get_table, _set_table)
@_memoized_property
def _from_objects(self):
t = self.table
if t is not None:
return [t]
else:
return []
@util.memoized_property
def description(self):
if util.py3k:
return self.name
else:
return self.name.encode('ascii', 'backslashreplace')
@_memoized_property
def _key_label(self):
if self.key != self.name:
return self._gen_label(self.key)
else:
return self._label
@_memoized_property
def _label(self):
return self._gen_label(self.name)
@_memoized_property
def _render_label_in_columns_clause(self):
return self.table is not None
def _gen_label(self, name):
t = self.table
if self.is_literal:
return None
elif t is not None and t.named_with_column:
if getattr(t, 'schema', None):
label = t.schema.replace('.', '_') + "_" + \
t.name + "_" + name
else:
label = t.name + "_" + name
# propagate name quoting rules for labels.
if getattr(name, "quote", None) is not None:
if isinstance(label, quoted_name):
label.quote = name.quote
else:
label = quoted_name(label, name.quote)
elif getattr(t.name, "quote", None) is not None:
# can't get this situation to occur, so let's
# assert false on it for now
assert not isinstance(label, quoted_name)
label = quoted_name(label, t.name.quote)
# ensure the label name doesn't conflict with that
# of an existing column
if label in t.c:
_label = label
counter = 1
while _label in t.c:
_label = label + "_" + str(counter)
counter += 1
label = _label
return _as_truncated(label)
else:
return name
def _bind_param(self, operator, obj):
return BindParameter(self.key, obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
unique=True)
def _make_proxy(self, selectable, name=None, attach=True,
name_is_truncatable=False, **kw):
# propagate the "is_literal" flag only if we are keeping our name,
# otherwise its considered to be a label
is_literal = self.is_literal and (name is None or name == self.name)
c = self._constructor(
_as_truncated(name or self.name) if
name_is_truncatable else
(name or self.name),
type_=self.type,
_selectable=selectable,
is_literal=is_literal
)
if name is None:
c.key = self.key
c._proxies = [self]
if selectable._is_clone_of is not None:
c._is_clone_of = \
selectable._is_clone_of.columns.get(c.key)
if attach:
selectable._columns[c.key] = c
return c
class _IdentifiedClause(Executable, ClauseElement):
__visit_name__ = 'identified'
_execution_options = \
Executable._execution_options.union({'autocommit': False})
def __init__(self, ident):
self.ident = ident
class SavepointClause(_IdentifiedClause):
__visit_name__ = 'savepoint'
class RollbackToSavepointClause(_IdentifiedClause):
__visit_name__ = 'rollback_to_savepoint'
class ReleaseSavepointClause(_IdentifiedClause):
__visit_name__ = 'release_savepoint'
class quoted_name(util.MemoizedSlots, util.text_type):
"""Represent a SQL identifier combined with quoting preferences.
:class:`.quoted_name` is a Python unicode/str subclass which
represents a particular identifier name along with a
``quote`` flag. This ``quote`` flag, when set to
``True`` or ``False``, overrides automatic quoting behavior
for this identifier in order to either unconditionally quote
or to not quote the name. If left at its default of ``None``,
quoting behavior is applied to the identifier on a per-backend basis
based on an examination of the token itself.
A :class:`.quoted_name` object with ``quote=True`` is also
prevented from being modified in the case of a so-called
"name normalize" option. Certain database backends, such as
Oracle, Firebird, and DB2 "normalize" case-insensitive names
as uppercase. The SQLAlchemy dialects for these backends
convert from SQLAlchemy's lower-case-means-insensitive convention
to the upper-case-means-insensitive conventions of those backends.
The ``quote=True`` flag here will prevent this conversion from occurring
to support an identifier that's quoted as all lower case against
such a backend.
The :class:`.quoted_name` object is normally created automatically
when specifying the name for key schema constructs such as
:class:`.Table`, :class:`.Column`, and others. The class can also be
passed explicitly as the name to any function that receives a name which
can be quoted. Such as to use the :meth:`.Engine.has_table` method with
an unconditionally quoted name::
from sqlaclchemy import create_engine
from sqlalchemy.sql.elements import quoted_name
engine = create_engine("oracle+cx_oracle://some_dsn")
engine.has_table(quoted_name("some_table", True))
The above logic will run the "has table" logic against the Oracle backend,
passing the name exactly as ``"some_table"`` without converting to
upper case.
.. versionadded:: 0.9.0
"""
__slots__ = 'quote', 'lower', 'upper'
def __new__(cls, value, quote):
if value is None:
return None
# experimental - don't bother with quoted_name
# if quote flag is None. doesn't seem to make any dent
# in performance however
# elif not sprcls and quote is None:
# return value
elif isinstance(value, cls) and (
quote is None or value.quote == quote
):
return value
self = super(quoted_name, cls).__new__(cls, value)
self.quote = quote
return self
def __reduce__(self):
return quoted_name, (util.text_type(self), self.quote)
def _memoized_method_lower(self):
if self.quote:
return self
else:
return util.text_type(self).lower()
def _memoized_method_upper(self):
if self.quote:
return self
else:
return util.text_type(self).upper()
def __repr__(self):
backslashed = self.encode('ascii', 'backslashreplace')
if not util.py2k:
backslashed = backslashed.decode('ascii')
return "'%s'" % backslashed
class _truncated_label(quoted_name):
"""A unicode subclass used to identify symbolic "
"names that may require truncation."""
__slots__ = ()
def __new__(cls, value, quote=None):
quote = getattr(value, "quote", quote)
# return super(_truncated_label, cls).__new__(cls, value, quote, True)
return super(_truncated_label, cls).__new__(cls, value, quote)
def __reduce__(self):
return self.__class__, (util.text_type(self), self.quote)
def apply_map(self, map_):
return self
class conv(_truncated_label):
"""Mark a string indicating that a name has already been converted
by a naming convention.
This is a string subclass that indicates a name that should not be
subject to any further naming conventions.
E.g. when we create a :class:`.Constraint` using a naming convention
as follows::
m = MetaData(naming_convention={
"ck": "ck_%(table_name)s_%(constraint_name)s"
})
t = Table('t', m, Column('x', Integer),
CheckConstraint('x > 5', name='x5'))
The name of the above constraint will be rendered as ``"ck_t_x5"``.
That is, the existing name ``x5`` is used in the naming convention as the
``constraint_name`` token.
In some situations, such as in migration scripts, we may be rendering
the above :class:`.CheckConstraint` with a name that's already been
converted. In order to make sure the name isn't double-modified, the
new name is applied using the :func:`.schema.conv` marker. We can
use this explicitly as follows::
m = MetaData(naming_convention={
"ck": "ck_%(table_name)s_%(constraint_name)s"
})
t = Table('t', m, Column('x', Integer),
CheckConstraint('x > 5', name=conv('ck_t_x5')))
Where above, the :func:`.schema.conv` marker indicates that the constraint
name here is final, and the name will render as ``"ck_t_x5"`` and not
``"ck_t_ck_t_x5"``
.. versionadded:: 0.9.4
.. seealso::
:ref:`constraint_naming_conventions`
"""
__slots__ = ()
class _defer_name(_truncated_label):
"""mark a name as 'deferred' for the purposes of automated name
generation.
"""
__slots__ = ()
def __new__(cls, value):
if value is None:
return _NONE_NAME
elif isinstance(value, conv):
return value
else:
return super(_defer_name, cls).__new__(cls, value)
def __reduce__(self):
return self.__class__, (util.text_type(self), )
class _defer_none_name(_defer_name):
"""indicate a 'deferred' name that was ultimately the value None."""
__slots__ = ()
_NONE_NAME = _defer_none_name("_unnamed_")
# for backwards compatibility in case
# someone is re-implementing the
# _truncated_identifier() sequence in a custom
# compiler
_generated_label = _truncated_label
class _anonymous_label(_truncated_label):
"""A unicode subclass used to identify anonymously
generated names."""
__slots__ = ()
def __add__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(self, util.text_type(other)),
self.quote)
)
def __radd__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(util.text_type(other), self),
self.quote)
)
def apply_map(self, map_):
if self.quote is not None:
# preserve quoting only if necessary
return quoted_name(self % map_, self.quote)
else:
# else skip the constructor call
return self % map_
def _as_truncated(value):
"""coerce the given value to :class:`._truncated_label`.
Existing :class:`._truncated_label` and
:class:`._anonymous_label` objects are passed
unchanged.
"""
if isinstance(value, _truncated_label):
return value
else:
return _truncated_label(value)
def _string_or_unprintable(element):
if isinstance(element, util.string_types):
return element
else:
try:
return str(element)
except Exception:
return "unprintable element %r" % element
def _expand_cloned(elements):
"""expand the given set of ClauseElements to be the set of all 'cloned'
predecessors.
"""
return itertools.chain(*[x._cloned_set for x in elements])
def _select_iterables(elements):
"""expand tables into individual columns in the
given list of column expressions.
"""
return itertools.chain(*[c._select_iterable for c in elements])
def _cloned_intersection(a, b):
"""return the intersection of sets a and b, counting
any overlap between 'cloned' predecessors.
The returned set is in terms of the entities present within 'a'.
"""
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if all_overlap.intersection(elem._cloned_set))
def _cloned_difference(a, b):
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if not all_overlap.intersection(elem._cloned_set))
def _labeled(element):
if not hasattr(element, 'name'):
return element.label(None)
else:
return element
def _is_column(col):
"""True if ``col`` is an instance of :class:`.ColumnElement`."""
return isinstance(col, ColumnElement)
def _find_columns(clause):
"""locate Column objects within the given expression."""
cols = util.column_set()
traverse(clause, {}, {'column': cols.add})
return cols
# there is some inconsistency here between the usage of
# inspect() vs. checking for Visitable and __clause_element__.
# Ideally all functions here would derive from inspect(),
# however the inspect() versions add significant callcount
# overhead for critical functions like _interpret_as_column_or_from().
# Generally, the column-based functions are more performance critical
# and are fine just checking for __clause_element__(). It is only
# _interpret_as_from() where we'd like to be able to receive ORM entities
# that have no defined namespace, hence inspect() is needed there.
def _column_as_key(element):
if isinstance(element, util.string_types):
return element
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
try:
return element.key
except AttributeError:
return None
def _clause_element_as_expr(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
else:
return element
def _literal_as_label_reference(element):
if isinstance(element, util.string_types):
return _textual_label_reference(element)
elif hasattr(element, '__clause_element__'):
element = element.__clause_element__()
return _literal_as_text(element)
def _literal_and_labels_as_label_reference(element):
if isinstance(element, util.string_types):
return _textual_label_reference(element)
elif hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if isinstance(element, ColumnElement) and \
element._order_by_label_element is not None:
return _label_reference(element)
else:
return _literal_as_text(element)
def _expression_literal_as_text(element):
return _literal_as_text(element, warn=True)
def _literal_as_text(element, warn=False):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif isinstance(element, util.string_types):
if warn:
util.warn_limited(
"Textual SQL expression %(expr)r should be "
"explicitly declared as text(%(expr)r)",
{"expr": util.ellipses_string(element)})
return TextClause(util.text_type(element))
elif isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
else:
raise exc.ArgumentError(
"SQL expression object or string expected, got object of type %r "
"instead" % type(element)
)
def _no_literals(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' "
"function to indicate a SQL expression "
"literal, or 'literal()' to indicate a "
"bound value." % element)
else:
return element
def _is_literal(element):
return not isinstance(element, Visitable) and \
not hasattr(element, '__clause_element__')
def _only_column_elements_or_none(element, name):
if element is None:
return None
else:
return _only_column_elements(element, name)
def _only_column_elements(element, name):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, ColumnElement):
raise exc.ArgumentError(
"Column-based expression object expected for argument "
"'%s'; got: '%s', type %s" % (name, element, type(element)))
return element
def _literal_as_binds(element, name=None, type_=None):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
if element is None:
return Null()
else:
return BindParameter(name, element, type_=type_, unique=True)
else:
return element
_guess_straight_column = re.compile(r'^\w\S*$', re.I)
def _interpret_as_column_or_from(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
insp = inspection.inspect(element, raiseerr=False)
if insp is None:
if isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
elif hasattr(insp, "selectable"):
return insp.selectable
# be forgiving as this is an extremely common
# and known expression
if element == "*":
guess_is_literal = True
elif isinstance(element, (numbers.Number)):
return ColumnClause(str(element), is_literal=True)
else:
element = str(element)
# give into temptation, as this fact we are guessing about
# is not one we've previously ever needed our users tell us;
# but let them know we are not happy about it
guess_is_literal = not _guess_straight_column.match(element)
util.warn_limited(
"Textual column expression %(column)r should be "
"explicitly declared with text(%(column)r), "
"or use %(literal_column)s(%(column)r) "
"for more specificity",
{
"column": util.ellipses_string(element),
"literal_column": "literal_column"
if guess_is_literal else "column"
})
return ColumnClause(
element,
is_literal=guess_is_literal)
def _const_expr(element):
if isinstance(element, (Null, False_, True_)):
return element
elif element is None:
return Null()
elif element is False:
return False_()
elif element is True:
return True_()
else:
raise exc.ArgumentError(
"Expected None, False, or True"
)
def _type_from_args(args):
for a in args:
if not a.type._isnull:
return a.type
else:
return type_api.NULLTYPE
def _corresponding_column_or_error(fromclause, column,
require_embedded=False):
c = fromclause.corresponding_column(column,
require_embedded=require_embedded)
if c is None:
raise exc.InvalidRequestError(
"Given column '%s', attached to table '%s', "
"failed to locate a corresponding column from table '%s'"
%
(column,
getattr(column, 'table', None),
fromclause.description)
)
return c
class AnnotatedColumnElement(Annotated):
def __init__(self, element, values):
Annotated.__init__(self, element, values)
ColumnElement.comparator._reset(self)
for attr in ('name', 'key', 'table'):
if self.__dict__.get(attr, False) is None:
self.__dict__.pop(attr)
def _with_annotations(self, values):
clone = super(AnnotatedColumnElement, self)._with_annotations(values)
ColumnElement.comparator._reset(clone)
return clone
@util.memoized_property
def name(self):
"""pull 'name' from parent, if not present"""
return self._Annotated__element.name
@util.memoized_property
def table(self):
"""pull 'table' from parent, if not present"""
return self._Annotated__element.table
@util.memoized_property
def key(self):
"""pull 'key' from parent, if not present"""
return self._Annotated__element.key
@util.memoized_property
def info(self):
return self._Annotated__element.info
@util.memoized_property
def anon_label(self):
return self._Annotated__element.anon_label
|
sunze/py_flask
|
venv/lib/python3.4/site-packages/sqlalchemy/sql/elements.py
|
Python
|
mit
| 132,392
|
[
"VisIt"
] |
2652b0acbf34dd55673087fe90b6ec133d769e11f7da1f4d135b49494861ac48
|
# coding=utf-8
# main codes, call functions at stokes_flow.py
# Zhang Ji, 20170518
import sys
import petsc4py
petsc4py.init(sys.argv)
import numpy as np
from src.stokes_flow import problem_dic, obj_dic
import src.stokes_flow as sf
from petsc4py import PETSc
from src.geo import *
import pickle
# def print_case_info(**problem_kwargs):
# comm = PETSc.COMM_WORLD.tompi4py()
# rank = comm.Get_rank()
# size = comm.Get_size()
#
# fileHandle = problem_kwargs['fileHandle']
# radius = problem_kwargs['radius']
# deltaLength = problem_kwargs['deltaLength']
# matrix_method = problem_kwargs['matrix_method']
# u = problem_kwargs['u']
#
# PETSc.Sys.Print('sphere radius: %f, delta length: %f, velocity: %f' % (radius, deltaLength, u))
#
# err_msg = "Only 'pf', 'rs', 'tp_rs', and 'lg_rs' methods are accept for this main code. "
# assert matrix_method in ('rs', 'tp_rs', 'lg_rs', 'rs_precondition', 'tp_rs_precondition', 'lg_rs_precondition', 'pf'), err_msg
# epsilon = problem_kwargs['epsilon']
# if matrix_method in ('rs', 'rs_precondition', 'pf'):
# PETSc.Sys.Print('create matrix method: %s, epsilon: %f'
# % (matrix_method, epsilon))
# elif matrix_method in ('tp_rs', 'tp_rs_precondition'):
# twoPara_n = problem_kwargs['twoPara_n']
# PETSc.Sys.Print('create matrix method: %s, epsilon: %f, order: %d'
# % (matrix_method, epsilon, twoPara_n))
# elif matrix_method in ('lg_rs', 'lg_rs_precondition'):
# legendre_m = problem_kwargs['legendre_m']
# legendre_k = problem_kwargs['legendre_k']
# PETSc.Sys.Print('create matrix method: %s, epsilon: %f, m: %d, k: %d, p: %d'
# % (matrix_method, epsilon, legendre_m, legendre_k, (legendre_m + 2 * legendre_k + 1)))
#
# solve_method = problem_kwargs['solve_method']
# precondition_method = problem_kwargs['precondition_method']
# PETSc.Sys.Print('solve method: %s, precondition method: %s'
# % (solve_method, precondition_method))
# PETSc.Sys.Print('output file headle: ' + fileHandle)
# PETSc.Sys.Print('MPI size: %d' % size)
def get_problem_kwargs(**main_kwargs):
OptDB = PETSc.Options()
radius = OptDB.getReal('r', 1)
n = OptDB.getInt('n', 200)
deltaLength = np.sqrt(4 * np.pi * radius * radius / n)
epsilon = OptDB.getReal('e', -1)
rel_omega = OptDB.getReal('rel_omega', 1)
rel_u = OptDB.getReal('rel_u', 1)
fileHandle = OptDB.getString('f', 'try_forcefree')
solve_method = OptDB.getString('s', 'gmres')
precondition_method = OptDB.getString('g', 'none')
plot = OptDB.getBool('plot', False)
matrix_method = OptDB.getString('sm', 'pf')
restart = OptDB.getBool('restart', False)
twoPara_n = OptDB.getInt('tp_n', 1)
legendre_m = OptDB.getInt('legendre_m', 3)
legendre_k = OptDB.getInt('legendre_k', 2)
n_sphere_check = OptDB.getInt('n_sphere_check', 2000)
n_node_threshold = OptDB.getInt('n_threshold', 10000)
random_velocity = OptDB.getBool('random_velocity', False)
getConvergenceHistory = OptDB.getBool('getConvergenceHistory', False)
pickProblem = OptDB.getBool('pickProblem', False)
n_obj = OptDB.getInt('n', 1)
n_obj_x = OptDB.getInt('nx', n_obj)
n_obj_y = OptDB.getInt('ny', n_obj)
distance = OptDB.getReal('dist', 3)
distance_x = OptDB.getReal('distx', distance)
distance_y = OptDB.getReal('disty', distance)
move_delta = np.array([distance_x, distance_y, 1])
# field_range: describe a sector area.
field_range = np.array([[-3, -3, -3], [n_obj_x - 1, n_obj_y - 1, 0] * move_delta + [3, 3, 3]])
n_grid = np.array([n_obj_x, n_obj_y, 1]) * 20
problem_kwargs = {
'name': 'spherePrb',
'matrix_method': matrix_method,
'deltaLength': deltaLength,
'epsilon': epsilon,
'delta': deltaLength * epsilon, # for rs method
'solve_method': solve_method,
'precondition_method': precondition_method,
'field_range': field_range,
'n_grid': n_grid,
'plot': plot,
'fileHandle': fileHandle,
'region_type': 'rectangle',
'twoPara_n': twoPara_n,
'legendre_m': legendre_m,
'legendre_k': legendre_k,
'radius': radius,
'rel_omega': rel_omega,
'rel_u': rel_u,
'random_velocity': random_velocity,
'n_obj_x': n_obj_x,
'n_obj_y': n_obj_y,
'move_delta': move_delta,
'restart': restart,
'n_sphere_check': n_sphere_check,
'n_node_threshold': n_node_threshold,
'getConvergenceHistory': getConvergenceHistory,
'pickProblem': pickProblem
}
for key in main_kwargs:
problem_kwargs[key] = main_kwargs[key]
return problem_kwargs
def main_fun(**main_kwargs):
problem_kwargs = get_problem_kwargs(**main_kwargs)
# print_case_info(**problem_kwargs)
fileHandle = problem_kwargs['fileHandle']
radius = problem_kwargs['radius']
deltaLength = problem_kwargs['deltaLength']
matrix_method = problem_kwargs['matrix_method']
n_obj_x = problem_kwargs['n_obj_x']
n_obj_y = problem_kwargs['n_obj_y']
move_delta = problem_kwargs['move_delta']
random_velocity = problem_kwargs['random_velocity']
getConvergenceHistory = problem_kwargs['getConvergenceHistory']
pickProblem = problem_kwargs['pickProblem']
epsilon = problem_kwargs['epsilon']
rel_u = problem_kwargs['rel_u']
rel_omega = problem_kwargs['rel_omega']
norm_rel_u = np.array((1, 1, 1))
norm_rel_omega = np.array((0, 0, 0))
rel_U = np.hstack((rel_u * norm_rel_u, rel_omega * norm_rel_omega))
center = np.zeros(3)
n = int(16 * radius * radius / deltaLength / deltaLength)
sphere_geo0 = sphere_geo() # force geo
sphere_geo0.create_n(n, radius)
sphere_geo0.node_rotation(theta=np.pi / 5)
sphere_geo1 = sphere_geo0.copy()
if matrix_method in ('pf',):
sphere_geo1.create_n(n, radius + deltaLength * epsilon)
obj_sphere = obj_dic[matrix_method]()
obj_sphere_kwargs = {'name': 'sphereObj_0_0'}
obj_sphere.set_data(sphere_geo1, sphere_geo0, **obj_sphere_kwargs)
name = 'obj_composite1'
obj_composite = sf.ForceFreeComposite(center, name)
obj_composite.add_obj(obj_sphere, rel_U=rel_U)
problem = sf.StokesletsInPipeforcefreeProblem(**problem_kwargs)
problem.add_obj(obj_composite)
problem.create_matrix()
problem.print_info()
problem.solve()
problem.pickmyself(fileHandle)
with open(fileHandle + '_pick.bin', 'rb') as input:
unpick = pickle.Unpickler(input)
problem = unpick.load()
problem.unpick_myself()
problem.create_matrix()
problem.print_info()
problem.solve()
problem.pickmyself(fileHandle)
PETSc.Sys.Print(obj_composite.get_ref_U())
PETSc.Sys.Print(obj_composite.get_total_force())
problem.vtk_obj(fileHandle)
geo_check = sphere_geo() # force geo
geo_check.create_n(n * 2, radius)
geo_check.set_rigid_velocity(rel_U + obj_composite.get_ref_U())
obj_check = obj_dic[matrix_method]()
obj_check.set_data(geo_check, geo_check, **obj_sphere_kwargs)
problem.vtk_check(fileHandle + '_check', obj_check)
problem.vtk_self(fileHandle)
obj_sphere.vtk(fileHandle)
force_sphere = obj_sphere.get_force_x()
PETSc.Sys().Print('---->>>%s: Resultant at x axis is %f' % (str(problem), force_sphere.sum()/(6*np.pi*radius)))
return True
if __name__ == '__main__':
main_fun()
|
pcmagic/stokes_flow
|
try_code/try_forcefreeinpipe.py
|
Python
|
mit
| 7,866
|
[
"VTK"
] |
400eef829b45a41efed7573f1da8ded3ef3bc482934b174bfc905adb1473bbbb
|
#!/usr/bin/env python
import pylab as pyl
from mpl_toolkits.axes_grid1 import AxesGrid
import cPickle as pickle
from colsort import colsort
def plot_uvj_vs_icd():
galaxies = pickle.load(open('galaxies.pickle','rb'))
galaxies = filter(lambda galaxy: galaxy.ICD_IH != None, galaxies)
galaxies = filter(lambda galaxy: galaxy.sersic != None and \
galaxy.ston_I > 30, galaxies)
#Upper and Lower limit arrow verts
arrowup_verts = [[0.,0.], [-1., -1], [0.,0.],
[0.,-2.], [0.,0.], [1,-1]]
#arrowdown_verts = [[0.,0.], [-1., 1], [0.,0.],
# [0.,2.], [0.,0.], [1, 1]]
F = pyl.figure(1,figsize=(8,3))
grid = AxesGrid(F, 111,
nrows_ncols=(1,4),
axes_pad = 0.1,
add_all=True,
aspect=False,
share_all = True)
ax1 = grid[0]
ax2 = grid[1]
ax3 = grid[2]
ax4 = grid[3]
for galaxy in galaxies:
if galaxy.sersic < 1.:
col1 =ax1.scatter(galaxy.Mass, galaxy.ICD_IH * 100.,
s=25, c='0.8', edgecolor='0.8')
if 1. < galaxy.sersic < 2.:
col2 =ax2.scatter(galaxy.Mass, galaxy.ICD_IH * 100.,
s=25, c='0.8', edgecolor='0.8')
if 2. < galaxy.sersic < 3.:
col3 =ax3.scatter(galaxy.Mass, galaxy.ICD_IH * 100.,
s=25, c='0.8', edgecolor='0.8')
if 3. < galaxy.sersic:
if galaxy.ICD_IH*100 < 50:
col4 =ax4.scatter(galaxy.Mass, galaxy.ICD_IH * 100.,
s=25, c='0.8', edgecolor='0.8')
else:
col4 = ax4.scatter(galaxy.Mass, 50, marker=None, s=100,
verts=arrowup_verts)
# Add the box and whiskers
galaxies1 = filter(lambda galaxy: galaxy.ston_I > 30. and \
galaxy.sersic < 1, galaxies)
galaxies1 = pyl.asarray(galaxies1)
galaxies2 = filter(lambda galaxy: galaxy.ston_I > 30. and \
1 < galaxy.sersic < 2, galaxies)
galaxies2 = pyl.asarray(galaxies2)
galaxies3 = filter(lambda galaxy: galaxy.ston_I > 30. and \
2 < galaxy.sersic < 3, galaxies)
galaxies3 = pyl.asarray(galaxies3)
galaxies4 = filter(lambda galaxy: galaxy.ston_I > 30. and \
3 < galaxy.sersic, galaxies)
galaxies4 = pyl.asarray(galaxies4)
x1 = [galaxy.Mass for galaxy in galaxies1]
x2 = [galaxy.Mass for galaxy in galaxies2]
x3 = [galaxy.Mass for galaxy in galaxies3]
x4 = [galaxy.Mass for galaxy in galaxies4]
grid1 = []
grid2 = []
grid3 = []
grid4 = []
from boxplot_percentile_width import percentile_box_plot as pbp
bins_x =pyl.array([8.5, 9., 9.5, 10., 11])
for i in range(bins_x.size-1):
xmin = bins_x[i]
xmax = bins_x[i+1]
cond=[cond1 and cond2 for cond1, cond2 in zip(x1>=xmin, x1<xmax)]
grid1.append(galaxies1.compress(cond))
icd1 = []
for i in range(len(grid1)):
icd1.append([galaxy.ICD_IH*100 for galaxy in grid1[i]])
width = pyl.diff(bins_x)
index = pyl.delete(bins_x,-1) + 0.25
index[-1] = index[-1] + 0.25
bp1 = pbp(ax1, icd1, indexer=list(index), width=width)
bins_x =pyl.array([8.5, 9., 9.5, 10., 10.5 ,11.5])
for i in range(bins_x.size-1):
xmin = bins_x[i]
xmax = bins_x[i+1]
cond=[cond1 and cond2 for cond1, cond2 in zip(x2>=xmin, x2<xmax)]
grid2.append(galaxies2.compress(cond))
icd2 = []
for i in range(len(grid2)):
icd2.append([galaxy.ICD_IH*100 for galaxy in grid2[i]])
width = pyl.diff(bins_x)
index = pyl.delete(bins_x,-1) + 0.25
index[-1] = index[-1] + 0.25
bp2 = pbp(ax2, icd2, indexer=list(index), width=width)
bins_x =pyl.array([8.5, 9.5, 10., 11.])
for i in range(bins_x.size-1):
xmin = bins_x[i]
xmax = bins_x[i+1]
cond=[cond1 and cond2 for cond1, cond2 in zip(x3>=xmin, x3<xmax)]
grid3.append(galaxies3.compress(cond))
icd3 = []
for i in range(len(grid3)):
icd3.append([galaxy.ICD_IH*100 for galaxy in grid3[i]])
width = pyl.diff(bins_x)
index = pyl.delete(bins_x,-1) + 0.25
index[-1] = index[-1] + 0.25
index[0] = index[0] + 0.25
bp3 = pbp(ax3, icd3, indexer=list(index), width=width)
bins_x =pyl.array([8.5, 9., 9.5, 10., 10.5, 11., 12.])
for i in range(bins_x.size-1):
xmin = bins_x[i]
xmax = bins_x[i+1]
cond=[cond1 and cond2 for cond1, cond2 in zip(x4>=xmin, x4<xmax)]
grid4.append(galaxies4.compress(cond))
icd4 = []
for i in range(len(grid4)):
icd4.append([galaxy.ICD_IH*100 for galaxy in grid4[i]])
width = pyl.diff(bins_x)
index = pyl.delete(bins_x,-1) + 0.25
index[-1] = index[-1] + 0.25
print 'ajsdf'
bp4 = pbp(ax4, icd4, indexer=list(index), width=width)
ax1.set_xticks([8, 9, 10, 11])
ax2.set_xticks([8, 9, 10, 11])
ax3.set_xticks([8, 9, 10, 11])
ax4.set_xticks([8, 9, 10, 11])
ax1.set_ylim(0, 50)
ax2.set_ylim(0, 50)
ax3.set_ylim(0, 50)
ax4.set_ylim(0, 50)
ax1.set_ylabel(r'$\xi[i_{775},H_{160}]$ (%)')
ax1.set_title('n < 1')
ax2.set_title('1 < n < 2')
ax3.set_title('2 < n < 3')
ax4.set_title('3 < n')
pyl.figtext(.5, .05, r'Log Mass $(M_{\odot})$',fontsize=18,
horizontalalignment='center')
ax1.axhline(0, lw=2, zorder=0)
ax2.axhline(0, lw=2, zorder=0)
ax3.axhline(0, lw=2, zorder=0)
ax4.axhline(0, lw=2, zorder=0)
import matplotlib.font_manager
line1 = pyl.Line2D([], [], marker='o', mfc='0.8', mec='0.8', markersize=8,
linewidth=0)
line2 = pyl.Line2D([], [], marker='s', mec='#348ABD', mfc='None',
markersize=10, linewidth=0, markeredgewidth=2)
line3 = pyl.Line2D([], [], color='#A60628', linewidth=2)
prop = matplotlib.font_manager.FontProperties(size='small')
ax3.legend((line1, line2, line3), ('Data', 'Quartiles',
'Medians'), loc='upper center', prop=prop, ncol=1)
pyl.tight_layout()
pyl.subplots_adjust(bottom=0.21, left=0.11)
pyl.show()
if __name__ =='__main__':
plot_uvj_vs_icd()
|
boada/ICD
|
sandbox/plot_sersic_vs_icd_vs_mass_box_width.py
|
Python
|
mit
| 6,098
|
[
"Galaxy"
] |
745bb7553a79e1dcc75e8b9bbef3ba35752a70848cd7b1c13496b5fc31236471
|
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar, Evan Feinberg, and Karl Leswing"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import logging
import os
import shutil
from warnings import warn
import time
import tempfile
import hashlib
import multiprocessing
from collections import Counter
from deepchem.utils.rdkit_util import load_molecule
from deepchem.utils.rdkit_util import MoleculeLoadException
import numpy as np
from scipy.spatial.distance import cdist
from copy import deepcopy
from deepchem.feat import ComplexFeaturizer
from deepchem.utils.save import log
"""
TODO(LESWING) add sanitization with rdkit upgrade to 2017.*
"""
def compute_centroid(coordinates):
"""Compute the x,y,z centroid of provided coordinates
coordinates: np.ndarray
Shape (N, 3), where N is number atoms.
"""
centroid = np.mean(coordinates, axis=0)
return (centroid)
def generate_random__unit_vector():
"""Generate a random unit vector on the 3-sphere.
citation:
http://mathworld.wolfram.com/SpherePointPicking.html
a. Choose random theta \element [0, 2*pi]
b. Choose random z \element [-1, 1]
c. Compute output vector u: (x,y,z) = (sqrt(1-z^2)*cos(theta), sqrt(1-z^2)*sin(theta),z)
"""
theta = np.random.uniform(low=0.0, high=2 * np.pi)
z = np.random.uniform(low=-1.0, high=1.0)
u = np.array(
[np.sqrt(1 - z**2) * np.cos(theta),
np.sqrt(1 - z**2) * np.sin(theta), z])
return (u)
def generate_random_rotation_matrix():
"""
1. Generate a random unit vector u, randomly sampled from the unit
3-sphere (see function generate_random__unit_vector() for details)
2. Generate a second random unit vector v
a. If absolute value of u \dot v > 0.99, repeat.
(This is important for numerical stability. Intuition: we want them to
be as linearly independent as possible or else the orthogonalized
version of v will be much shorter in magnitude compared to u. I assume
in Stack they took this from Gram-Schmidt orthogonalization?)
b. v" = v - (u \dot v)*u, i.e. subtract out the component of v that's in
u's direction
c. normalize v" (this isn"t in Stack but I assume it must be done)
3. find w = u \cross v"
4. u, v", and w will form the columns of a rotation matrix, R. The
intuition is that u, v" and w are, respectively, what the standard basis
vectors e1, e2, and e3 will be mapped to under the transformation.
"""
u = generate_random__unit_vector()
v = generate_random__unit_vector()
while np.abs(np.dot(u, v)) >= 0.99:
v = generate_random__unit_vector()
vp = v - (np.dot(u, v) * u)
vp /= np.linalg.norm(vp)
w = np.cross(u, vp)
R = np.column_stack((u, vp, w))
return (R)
def rotate_molecules(mol_coordinates_list):
"""Rotates provided molecular coordinates.
Pseudocode:
1. Generate random rotation matrix. This matrix applies a random
transformation to any 3-vector such that, were the random transformation
repeatedly applied, it would randomly sample along the surface of a sphere
with radius equal to the norm of the given 3-vector cf.
_generate_random_rotation_matrix() for details
2. Apply R to all atomic coordinatse.
3. Return rotated molecule
"""
R = generate_random_rotation_matrix()
rotated_coordinates_list = []
for mol_coordinates in mol_coordinates_list:
coordinates = deepcopy(mol_coordinates)
rotated_coordinates = np.transpose(np.dot(R, np.transpose(coordinates)))
rotated_coordinates_list.append(rotated_coordinates)
return (rotated_coordinates_list)
def compute_pairwise_distances(protein_xyz, ligand_xyz):
"""Takes an input m x 3 and n x 3 np arrays of 3D coords of protein and ligand,
respectively, and outputs an m x n np array of pairwise distances in Angstroms
between protein and ligand atoms. entry (i,j) is dist between the i"th protein
atom and the j"th ligand atom.
"""
pairwise_distances = cdist(protein_xyz, ligand_xyz, metric='euclidean')
return (pairwise_distances)
"""following two functions adapted from:
http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
"""
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle_between(vector_i, vector_j):
"""Returns the angle in radians between vectors "vector_i" and "vector_j"::
>>> print("%0.06f" % angle_between((1, 0, 0), (0, 1, 0)))
1.570796
>>> print("%0.06f" % angle_between((1, 0, 0), (1, 0, 0)))
0.000000
>>> print("%0.06f" % angle_between((1, 0, 0), (-1, 0, 0)))
3.141593
Note that this function always returns the smaller of the two angles between
the vectors (value between 0 and pi).
"""
vector_i_u = unit_vector(vector_i)
vector_j_u = unit_vector(vector_j)
angle = np.arccos(np.dot(vector_i_u, vector_j_u))
if np.isnan(angle):
if np.allclose(vector_i_u, vector_j_u):
return 0.0
else:
return np.pi
return angle
def hash_sybyl(sybyl, sybyl_types):
return (sybyl_types.index(sybyl))
def hash_ecfp(ecfp, power):
"""
Returns an int of size 2^power representing that
ECFP fragment. Input must be a string.
"""
ecfp = ecfp.encode('utf-8')
md5 = hashlib.md5()
md5.update(ecfp)
digest = md5.hexdigest()
ecfp_hash = int(digest, 16) % (2**power)
return (ecfp_hash)
def hash_ecfp_pair(ecfp_pair, power):
"""Returns an int of size 2^power representing that ECFP pair. Input must be
a tuple of strings.
"""
ecfp = "%s,%s" % (ecfp_pair[0], ecfp_pair[1])
ecfp = ecfp.encode('utf-8')
md5 = hashlib.md5()
md5.update(ecfp)
digest = md5.hexdigest()
ecfp_hash = int(digest, 16) % (2**power)
return (ecfp_hash)
def compute_all_ecfp(mol, indices=None, degree=2):
"""Obtain molecular fragment for all atoms emanating outward to given degree.
For each fragment, compute SMILES string (for now) and hash to an int.
Return a dictionary mapping atom index to hashed SMILES.
"""
ecfp_dict = {}
from rdkit import Chem
for i in range(mol.GetNumAtoms()):
if indices is not None and i not in indices:
continue
env = Chem.FindAtomEnvironmentOfRadiusN(mol, degree, i, useHs=True)
submol = Chem.PathToSubmol(mol, env)
smile = Chem.MolToSmiles(submol)
ecfp_dict[i] = "%s,%s" % (mol.GetAtoms()[i].GetAtomicNum(), smile)
return ecfp_dict
def compute_ecfp_features(mol, ecfp_degree=2, ecfp_power=11):
"""Computes ECFP features for provided rdkit molecule.
Parameters:
-----------
mol: rdkit molecule
Molecule to featurize.
ecfp_degree: int
ECFP radius
ecfp_power: int
Number of bits to store ECFP features (2^ecfp_power will be length of
ECFP array)
Returns:
--------
ecfp_array: np.ndarray
Returns an array of size 2^ecfp_power where array at index i has a 1 if
that ECFP fragment is found in the molecule and array at index j has a 0
if ECFP fragment not in molecule.
"""
from rdkit.Chem import AllChem
bv = AllChem.GetMorganFingerprintAsBitVect(
mol, ecfp_degree, nBits=2**ecfp_power)
return np.array(bv)
def featurize_binding_pocket_ecfp(protein_xyz,
protein,
ligand_xyz,
ligand,
pairwise_distances=None,
cutoff=4.5,
ecfp_degree=2):
"""Computes ECFP dicts for ligand and binding pocket of the protein.
Parameters
----------
protein_xyz: np.ndarray
Of shape (N_protein_atoms, 3)
protein: rdkit.rdchem.Mol
Contains more metadata.
ligand_xyz: np.ndarray
Of shape (N_ligand_atoms, 3)
ligand: rdkit.rdchem.Mol
Contains more metadata
pairwise_distances: np.ndarray
Array of pairwise protein-ligand distances (Angstroms)
cutoff: float
Cutoff distance for contact consideration
ecfp_degree: int
ECFP radius
"""
if pairwise_distances is None:
pairwise_distances = compute_pairwise_distances(protein_xyz, ligand_xyz)
contacts = np.nonzero((pairwise_distances < cutoff))
protein_atoms = set([int(c) for c in contacts[0].tolist()])
protein_ecfp_dict = compute_all_ecfp(
protein, indices=protein_atoms, degree=ecfp_degree)
ligand_ecfp_dict = compute_all_ecfp(ligand, degree=ecfp_degree)
return (protein_ecfp_dict, ligand_ecfp_dict)
def compute_all_sybyl(mol, indices=None):
"""Computes Sybyl atom types for atoms in molecule."""
raise NotImplementedError("This function is not implemented yet")
def featurize_binding_pocket_sybyl(protein_xyz,
protein,
ligand_xyz,
ligand,
pairwise_distances=None,
cutoff=7.0):
"""Computes Sybyl dicts for ligand and binding pocket of the protein.
Parameters
----------
protein_xyz: np.ndarray
Of shape (N_protein_atoms, 3)
protein: Rdkit Molecule
Contains more metadata.
ligand_xyz: np.ndarray
Of shape (N_ligand_atoms, 3)
ligand: Rdkit Molecule
Contains more metadata
pairwise_distances: np.ndarray
Array of pairwise protein-ligand distances (Angstroms)
cutoff: float
Cutoff distance for contact consideration.
"""
features_dict = {}
if pairwise_distances is None:
pairwise_distances = compute_pairwise_distances(protein_xyz, ligand_xyz)
contacts = np.nonzero((pairwise_distances < cutoff))
protein_atoms = set([int(c) for c in contacts[0].tolist()])
protein_sybyl_dict = compute_all_sybyl(protein, indices=protein_atoms)
ligand_sybyl_dict = compute_all_sybyl(ligand)
return (protein_sybyl_dict, ligand_sybyl_dict)
def compute_splif_features_in_range(protein,
ligand,
pairwise_distances,
contact_bin,
ecfp_degree=2):
"""Computes SPLIF features for protein atoms close to ligand atoms.
Finds all protein atoms that are > contact_bin[0] and < contact_bin[1] away
from ligand atoms. Then, finds the ECFP fingerprints for the contacting
atoms. Returns a dictionary mapping (protein_index_i, ligand_index_j) -->
(protein_ecfp_i, ligand_ecfp_j)
"""
contacts = np.nonzero((pairwise_distances > contact_bin[0]) &
(pairwise_distances < contact_bin[1]))
protein_atoms = set([int(c) for c in contacts[0].tolist()])
contacts = zip(contacts[0], contacts[1])
protein_ecfp_dict = compute_all_ecfp(
protein, indices=protein_atoms, degree=ecfp_degree)
ligand_ecfp_dict = compute_all_ecfp(ligand, degree=ecfp_degree)
splif_dict = {
contact: (protein_ecfp_dict[contact[0]], ligand_ecfp_dict[contact[1]])
for contact in contacts
}
return (splif_dict)
def featurize_splif(protein_xyz, protein, ligand_xyz, ligand, contact_bins,
pairwise_distances, ecfp_degree):
"""Computes SPLIF featurization of protein-ligand binding pocket.
For each contact range (i.e. 1 A to 2 A, 2 A to 3 A, etc.) compute a
dictionary mapping (protein_index_i, ligand_index_j) tuples -->
(protein_ecfp_i, ligand_ecfp_j) tuples. Return a list of such splif
dictionaries.
"""
splif_dicts = []
for i, contact_bin in enumerate(contact_bins):
splif_dicts.append(
compute_splif_features_in_range(protein, ligand, pairwise_distances,
contact_bin, ecfp_degree))
return (splif_dicts)
def compute_ring_center(mol, ring_indices):
"""Computes 3D coordinates of a center of a given ring.
Parameters:
-----------
mol: rdkit.rdchem.Mol
Molecule containing a ring
ring_indices: array-like
Indices of atoms forming a ring
Returns:
--------
ring_centroid: np.ndarray
Position of a ring center
"""
conformer = mol.GetConformer()
ring_xyz = np.zeros((len(ring_indices), 3))
for i, atom_idx in enumerate(ring_indices):
atom_position = conformer.GetAtomPosition(atom_idx)
ring_xyz[i] = np.array(atom_position)
ring_centroid = compute_centroid(ring_xyz)
return ring_centroid
def compute_ring_normal(mol, ring_indices):
"""Computes normal to a plane determined by a given ring.
Parameters:
-----------
mol: rdkit.rdchem.Mol
Molecule containing a ring
ring_indices: array-like
Indices of atoms forming a ring
Returns:
--------
normal: np.ndarray
Normal vector
"""
conformer = mol.GetConformer()
points = np.zeros((3, 3))
for i, atom_idx in enumerate(ring_indices[:3]):
atom_position = conformer.GetAtomPosition(atom_idx)
points[i] = np.array(atom_position)
v1 = points[1] - points[0]
v2 = points[2] - points[0]
normal = np.cross(v1, v2)
return normal
def is_pi_parallel(ring1_center,
ring1_normal,
ring2_center,
ring2_normal,
dist_cutoff=8.0,
angle_cutoff=30.0):
"""Check if two aromatic rings form a parallel pi-pi contact.
Parameters:
-----------
ring1_center, ring2_center: np.ndarray
Positions of centers of the two rings. Can be computed with the
compute_ring_center function.
ring1_normal, ring2_normal: np.ndarray
Normals of the two rings. Can be computed with the compute_ring_normal
function.
dist_cutoff: float
Distance cutoff. Max allowed distance between the ring center (Angstroms).
angle_cutoff: float
Angle cutoff. Max allowed deviation from the ideal (0deg) angle between
the rings (in degrees).
"""
dist = np.linalg.norm(ring1_center - ring2_center)
angle = angle_between(ring1_normal, ring2_normal) * 180 / np.pi
if ((angle < angle_cutoff or angle > 180.0 - angle_cutoff) and
dist < dist_cutoff):
return True
return False
def is_pi_t(ring1_center,
ring1_normal,
ring2_center,
ring2_normal,
dist_cutoff=5.5,
angle_cutoff=30.0):
"""Check if two aromatic rings form a T-shaped pi-pi contact.
Parameters:
-----------
ring1_center, ring2_center: np.ndarray
Positions of centers of the two rings. Can be computed with the
compute_ring_center function.
ring1_normal, ring2_normal: np.ndarray
Normals of the two rings. Can be computed with the compute_ring_normal
function.
dist_cutoff: float
Distance cutoff. Max allowed distance between the ring center (Angstroms).
angle_cutoff: float
Angle cutoff. Max allowed deviation from the ideal (90deg) angle between
the rings (in degrees).
"""
dist = np.linalg.norm(ring1_center - ring2_center)
angle = angle_between(ring1_normal, ring2_normal) * 180 / np.pi
if ((90.0 - angle_cutoff < angle < 90.0 + angle_cutoff) and
dist < dist_cutoff):
return True
return False
def compute_pi_stack(protein,
ligand,
pairwise_distances=None,
dist_cutoff=4.4,
angle_cutoff=30.):
"""Find aromatic rings in protein and ligand that form pi-pi contacts.
For each atom in the contact, count number of atoms in the other molecule
that form this contact.
Pseudocode:
for each aromatic ring in protein:
for each aromatic ring in ligand:
compute distance between centers
compute angle between normals
if it counts as parallel pi-pi:
count interacting atoms
if it counts as pi-T:
count interacting atoms
Parameters:
-----------
protein, ligand: rdkit.rdchem.Mol
Two interacting molecules.
pairwise_distances: np.ndarray (optional)
Array of pairwise protein-ligand distances (Angstroms)
dist_cutoff: float
Distance cutoff. Max allowed distance between the ring center (Angstroms).
angle_cutoff: float
Angle cutoff. Max allowed deviation from the ideal angle between rings.
Returns:
--------
protein_pi_t, protein_pi_parallel, ligand_pi_t, ligand_pi_parallel: dict
Dictionaries mapping atom indices to number of atoms they interact with.
Separate dictionary is created for each type of pi stacking (parallel and
T-shaped) and each molecule (protein and ligand).
"""
protein_pi_parallel = Counter()
protein_pi_t = Counter()
ligand_pi_parallel = Counter()
ligand_pi_t = Counter()
protein_aromatic_rings = []
ligand_aromatic_rings = []
from rdkit import Chem
for mol, ring_list in ((protein, protein_aromatic_rings),
(ligand, ligand_aromatic_rings)):
aromatic_atoms = {atom.GetIdx() for atom in mol.GetAromaticAtoms()}
for ring in Chem.GetSymmSSSR(mol):
# if ring is aromatic
if set(ring).issubset(aromatic_atoms):
# save its indices, center, and normal
ring_center = compute_ring_center(mol, ring)
ring_normal = compute_ring_normal(mol, ring)
ring_list.append((ring, ring_center, ring_normal))
# remember protein-ligand pairs we already counted
counted_pairs_parallel = set()
counted_pairs_t = set()
for prot_ring, prot_ring_center, prot_ring_normal in protein_aromatic_rings:
for lig_ring, lig_ring_center, lig_ring_normal in ligand_aromatic_rings:
if is_pi_parallel(
prot_ring_center,
prot_ring_normal,
lig_ring_center,
lig_ring_normal,
angle_cutoff=angle_cutoff,
dist_cutoff=dist_cutoff):
prot_to_update = set()
lig_to_update = set()
for prot_atom_idx in prot_ring:
for lig_atom_idx in lig_ring:
if (prot_atom_idx, lig_atom_idx) not in counted_pairs_parallel:
# if this pair is new, count atoms forming a contact
prot_to_update.add(prot_atom_idx)
lig_to_update.add(lig_atom_idx)
counted_pairs_parallel.add((prot_atom_idx, lig_atom_idx))
protein_pi_parallel.update(prot_to_update)
ligand_pi_parallel.update(lig_to_update)
if is_pi_t(
prot_ring_center,
prot_ring_normal,
lig_ring_center,
lig_ring_normal,
angle_cutoff=angle_cutoff,
dist_cutoff=dist_cutoff):
prot_to_update = set()
lig_to_update = set()
for prot_atom_idx in prot_ring:
for lig_atom_idx in lig_ring:
if (prot_atom_idx, lig_atom_idx) not in counted_pairs_t:
# if this pair is new, count atoms forming a contact
prot_to_update.add(prot_atom_idx)
lig_to_update.add(lig_atom_idx)
counted_pairs_t.add((prot_atom_idx, lig_atom_idx))
protein_pi_t.update(prot_to_update)
ligand_pi_t.update(lig_to_update)
return (protein_pi_t, protein_pi_parallel, ligand_pi_t, ligand_pi_parallel)
def is_cation_pi(cation_position,
ring_center,
ring_normal,
dist_cutoff=6.5,
angle_cutoff=30.0):
"""Check if a cation and an aromatic ring form contact.
Parameters:
-----------
ring_center: np.ndarray
Positions of ring center. Can be computed with the compute_ring_center
function.
ring_normal: np.ndarray
Normal of ring. Can be computed with the compute_ring_normal function.
dist_cutoff: float
Distance cutoff. Max allowed distance between ring center and cation
(in Angstroms).
angle_cutoff: float
Angle cutoff. Max allowed deviation from the ideal (0deg) angle between
ring normal and vector pointing from ring center to cation (in degrees).
"""
cation_to_ring_vec = cation_position - ring_center
dist = np.linalg.norm(cation_to_ring_vec)
angle = angle_between(cation_to_ring_vec, ring_normal) * 180. / np.pi
if ((angle < angle_cutoff or angle > 180.0 - angle_cutoff) and
(dist < dist_cutoff)):
return True
return False
def compute_cation_pi(mol1, mol2, charge_tolerance=0.01, **kwargs):
"""Finds aromatic rings in mo1 and cations in mol2 that interact with each
other.
Parameters:
-----------
mol1: rdkit.rdchem.Mol
Molecule to look for interacting rings
mol2: rdkit.rdchem.Mol
Molecule to look for interacting cations
charge_tolerance: float
Atom is considered a cation if its formal charge is greater than
1 - charge_tolerance
**kwargs:
Arguments that are passed to is_cation_pi function
Returns:
--------
mol1_pi: dict
Dictionary that maps atom indices (from mol1) to the number of cations
(in mol2) they interact with
mol2_cation: dict
Dictionary that maps atom indices (from mol2) to the number of aromatic
atoms (in mol1) they interact with
"""
mol1_pi = Counter()
mol2_cation = Counter()
conformer = mol2.GetConformer()
aromatic_atoms = set(atom.GetIdx() for atom in mol1.GetAromaticAtoms())
from rdkit import Chem
rings = [list(r) for r in Chem.GetSymmSSSR(mol1)]
for ring in rings:
# if ring from mol1 is aromatic
if set(ring).issubset(aromatic_atoms):
ring_center = compute_ring_center(mol1, ring)
ring_normal = compute_ring_normal(mol1, ring)
for atom in mol2.GetAtoms():
# ...and atom from mol2 is a cation
if atom.GetFormalCharge() > 1.0 - charge_tolerance:
cation_position = np.array(conformer.GetAtomPosition(atom.GetIdx()))
# if angle and distance are correct
if is_cation_pi(cation_position, ring_center, ring_normal, **kwargs):
# count atoms forming a contact
mol1_pi.update(ring)
mol2_cation.update([atom.GetIndex()])
return mol1_pi, mol2_cation
def compute_binding_pocket_cation_pi(protein, ligand, **kwargs):
"""Finds cation-pi interactions between protein and ligand.
Parameters:
-----------
protein, ligand: rdkit.rdchem.Mol
Interacting molecules
**kwargs:
Arguments that are passed to compute_cation_pi function
Returns:
--------
protein_cation_pi, ligand_cation_pi: dict
Dictionaries that maps atom indices to the number of cations/aromatic
atoms they interact with
"""
# find interacting rings from protein and cations from ligand
protein_pi, ligand_cation = compute_cation_pi(protein, ligand, **kwargs)
# find interacting cations from protein and rings from ligand
ligand_pi, protein_cation = compute_cation_pi(ligand, protein, **kwargs)
# merge counters
protein_cation_pi = Counter()
protein_cation_pi.update(protein_pi)
protein_cation_pi.update(protein_cation)
ligand_cation_pi = Counter()
ligand_cation_pi.update(ligand_pi)
ligand_cation_pi.update(ligand_cation)
return protein_cation_pi, ligand_cation_pi
def get_partial_charge(atom):
"""Get partial charge of a given atom (rdkit Atom object)"""
try:
value = atom.GetProp(str("_GasteigerCharge"))
if value == '-nan':
return 0
return float(value)
except KeyError:
return 0
def get_formal_charge(atom):
warn(
'get_formal_charge function is deprecated and will be removed'
' in version 1.4, use get_partial_charge instead', DeprecationWarning)
return get_partial_charge(atom)
def is_salt_bridge(atom_i, atom_j):
"""Check if two atoms have correct charges to form a salt bridge"""
if np.abs(2.0 - np.abs(
get_partial_charge(atom_i) - get_partial_charge(atom_j))) < 0.01:
return True
return False
def compute_salt_bridges(protein_xyz,
protein,
ligand_xyz,
ligand,
pairwise_distances,
cutoff=5.0):
"""Find salt bridge contacts between protein and lingand.
Parameters:
-----------
protein_xyz, ligand_xyz: np.ndarray
Arrays with atomic coordinates
protein, ligand: rdkit.rdchem.Mol
Interacting molecules
pairwise_distances: np.ndarray
Array of pairwise protein-ligand distances (Angstroms)
cutoff: float
Cutoff distance for contact consideration
Returns:
--------
salt_bridge_contacts: list of tuples
List of contacts. Tuple (i, j) indicates that atom i from protein
interacts with atom j from ligand.
"""
salt_bridge_contacts = []
contacts = np.nonzero(pairwise_distances < cutoff)
contacts = zip(contacts[0], contacts[1])
for contact in contacts:
protein_atom = protein.GetAtoms()[int(contact[0])]
ligand_atom = ligand.GetAtoms()[int(contact[1])]
if is_salt_bridge(protein_atom, ligand_atom):
salt_bridge_contacts.append(contact)
return salt_bridge_contacts
def is_angle_within_cutoff(vector_i, vector_j, hbond_angle_cutoff):
angle = angle_between(vector_i, vector_j) * 180. / np.pi
return (angle > (180 - hbond_angle_cutoff) and
angle < (180. + hbond_angle_cutoff))
def is_hydrogen_bond(protein_xyz, protein, ligand_xyz, ligand, contact,
hbond_angle_cutoff):
"""
Determine if a pair of atoms (contact = tuple of protein_atom_index, ligand_atom_index)
between protein and ligand represents a hydrogen bond. Returns a boolean result.
"""
# TODO(LESWING)
return False
def compute_hbonds_in_range(protein, protein_xyz, ligand, ligand_xyz,
pairwise_distances, hbond_dist_bin,
hbond_angle_cutoff):
"""
Find all pairs of (protein_index_i, ligand_index_j) that hydrogen bond given
a distance bin and an angle cutoff.
"""
contacts = np.nonzero((pairwise_distances > hbond_dist_bin[0]) &
(pairwise_distances < hbond_dist_bin[1]))
contacts = zip(contacts[0], contacts[1])
hydrogen_bond_contacts = []
for contact in contacts:
if is_hydrogen_bond(protein_xyz, protein, ligand_xyz, ligand, contact,
hbond_angle_cutoff):
hydrogen_bond_contacts.append(contact)
return hydrogen_bond_contacts
def compute_hydrogen_bonds(protein_xyz, protein, ligand_xyz, ligand,
pairwise_distances, hbond_dist_bins,
hbond_angle_cutoffs):
"""Computes hydrogen bonds between proteins and ligands.
Returns a list of sublists. Each sublist is a series of tuples of
(protein_index_i, ligand_index_j) that represent a hydrogen bond. Each sublist
represents a different type of hydrogen bond.
"""
hbond_contacts = []
for i, hbond_dist_bin in enumerate(hbond_dist_bins):
hbond_angle_cutoff = hbond_angle_cutoffs[i]
hbond_contacts.append(
compute_hbonds_in_range(protein, protein_xyz, ligand, ligand_xyz,
pairwise_distances, hbond_dist_bin,
hbond_angle_cutoff))
return (hbond_contacts)
def convert_atom_to_voxel(molecule_xyz,
atom_index,
box_width,
voxel_width,
verbose=False):
"""Converts atom coordinates to an i,j,k grid index.
Parameters:
-----------
molecule_xyz: np.ndarray
Array with coordinates of all atoms in the molecule, shape (N, 3)
atom_index: int
Index of an atom
box_width: float
Size of a box
voxel_width: float
Size of a voxel
verbose: bool
Print warnings when atom is outside of a box
"""
indices = np.floor(
(molecule_xyz[atom_index] + box_width / 2.0) / voxel_width).astype(int)
if ((indices < 0) | (indices >= box_width / voxel_width)).any():
if verbose:
warn('Coordinates are outside of the box (atom id = %s,'
' coords xyz = %s, coords in box = %s' %
(atom_index, molecule_xyz[atom_index], indices))
return ([indices])
def convert_atom_pair_to_voxel(molecule_xyz_tuple, atom_index_pair, box_width,
voxel_width):
"""Converts a pair of atoms to a list of i,j,k tuples."""
indices_list = []
indices_list.append(
convert_atom_to_voxel(molecule_xyz_tuple[0], atom_index_pair[0],
box_width, voxel_width)[0])
indices_list.append(
convert_atom_to_voxel(molecule_xyz_tuple[1], atom_index_pair[1],
box_width, voxel_width)[0])
return (indices_list)
def compute_charge_dictionary(molecule):
"""Create a dictionary with partial charges for each atom in the molecule.
This function assumes that the charges for the molecule are already
computed (it can be done with rdkit_util.compute_charges(molecule))
"""
charge_dictionary = {}
for i, atom in enumerate(molecule.GetAtoms()):
charge_dictionary[i] = get_partial_charge(atom)
return charge_dictionary
def subtract_centroid(xyz, centroid):
"""Subtracts centroid from each coordinate.
Subtracts the centroid, a numpy array of dim 3, from all coordinates of all
atoms in the molecule
"""
xyz -= np.transpose(centroid)
return (xyz)
class RdkitGridFeaturizer(ComplexFeaturizer):
"""Featurizes protein-ligand complex using flat features or a 3D grid (in which
each voxel is described with a vector of features).
"""
def __init__(self,
nb_rotations=0,
feature_types=None,
ecfp_degree=2,
ecfp_power=3,
splif_power=3,
box_width=16.0,
voxel_width=1.0,
flatten=False,
verbose=True,
sanitize=False,
**kwargs):
"""Parameters:
-----------
nb_rotations: int, optional (default 0)
Number of additional random rotations of a complex to generate.
feature_types: list, optional (default ['ecfp'])
Types of features to calculate. Available types are:
flat features: 'ecfp_ligand', 'ecfp_hashed', 'splif_hashed', 'hbond_count'
voxel features: 'ecfp', 'splif', 'sybyl', 'salt_bridge', 'charge', 'hbond',
'pi_stack, 'cation_pi'
There are also 3 predefined sets of features: 'flat_combined',
'voxel_combined', and 'all_combined'. Calculated features are concatenated
and their order is preserved (features in predefined sets are in
alphabetical order).
ecfp_degree: int, optional (default 2)
ECFP radius.
ecfp_power: int, optional (default 3)
Number of bits to store ECFP features (resulting vector will be
2^ecfp_power long)
splif_power: int, optional (default 3)
Number of bits to store SPLIF features (resulting vector will be
2^splif_power long)
box_width: float, optional (default 16.0)
Size of a box in which voxel features are calculated. Box is centered on a
ligand centroid.
voxel_width: float, optional (default 1.0)
Size of a 3D voxel in a grid.
flatten: bool, optional (defaul False)
Indicate whether calculated features should be flattened. Output is always
flattened if flat features are specified in feature_types.
verbose: bool, optional (defaul True)
Verbolity for logging
sanitize: bool, optional (defaul False)
If set to True molecules will be sanitized. Note that calculating some
features (e.g. aromatic interactions) require sanitized molecules.
**kwargs: dict, optional
Keyword arguments can be usaed to specify custom cutoffs and bins (see
default values below).
Default cutoffs and bins:
-------------------------
hbond_dist_bins: [(2.2, 2.5), (2.5, 3.2), (3.2, 4.0)]
hbond_angle_cutoffs: [5, 50, 90]
splif_contact_bins: [(0, 2.0), (2.0, 3.0), (3.0, 4.5)]
ecfp_cutoff: 4.5
sybyl_cutoff: 7.0
salt_bridges_cutoff: 5.0
pi_stack_dist_cutoff: 4.4
pi_stack_angle_cutoff: 30.0
cation_pi_dist_cutoff: 6.5
cation_pi_angle_cutoff: 30.0
"""
# check if user tries to set removed arguments
deprecated_args = [
'box_x', 'box_y', 'box_z', 'save_intermediates', 'voxelize_features',
'parallel', 'voxel_feature_types'
]
# list of features that require sanitized molecules
require_sanitized = ['pi_stack', 'cation_pi', 'ecfp_ligand']
# not implemented featurization types
not_implemented = ['sybyl']
for arg in deprecated_args:
if arg in kwargs and verbose:
warn(
'%s argument was removed and it is ignored,'
' using it will result in error in version 1.4' % arg,
DeprecationWarning)
self.verbose = verbose
self.sanitize = sanitize
self.flatten = flatten
self.ecfp_degree = ecfp_degree
self.ecfp_power = ecfp_power
self.splif_power = splif_power
self.nb_rotations = nb_rotations
# default values
self.cutoffs = {
'hbond_dist_bins': [(2.2, 2.5), (2.5, 3.2), (3.2, 4.0)],
'hbond_angle_cutoffs': [5, 50, 90],
'splif_contact_bins': [(0, 2.0), (2.0, 3.0), (3.0, 4.5)],
'ecfp_cutoff': 4.5,
'sybyl_cutoff': 7.0,
'salt_bridges_cutoff': 5.0,
'pi_stack_dist_cutoff': 4.4,
'pi_stack_angle_cutoff': 30.0,
'cation_pi_dist_cutoff': 6.5,
'cation_pi_angle_cutoff': 30.0,
}
# update with cutoffs specified by the user
for arg, value in kwargs.items():
if arg in self.cutoffs:
self.cutoffs[arg] = value
self.box_width = float(box_width)
self.voxel_width = float(voxel_width)
self.voxels_per_edge = int(self.box_width / self.voxel_width)
self.sybyl_types = [
"C3", "C2", "C1", "Cac", "Car", "N3", "N3+", "Npl", "N2", "N1", "Ng+",
"Nox", "Nar", "Ntr", "Nam", "Npl3", "N4", "O3", "O-", "O2", "O.co2",
"O.spc", "O.t3p", "S3", "S3+", "S2", "So2", "Sox"
"Sac"
"SO", "P3", "P", "P3+", "F", "Cl", "Br", "I"
]
self.FLAT_FEATURES = [
'ecfp_ligand', 'ecfp_hashed', 'splif_hashed', 'hbond_count'
]
self.VOXEL_FEATURES = [
'ecfp', 'splif', 'sybyl', 'salt_bridge', 'charge', 'hbond', 'pi_stack',
'cation_pi'
]
if feature_types is None:
feature_types = ['ecfp']
# each entry is a tuple (is_flat, feature_name)
self.feature_types = []
# list of features that cannot be calculated with specified parameters
# this list is used to define <flat/voxel/all>_combined subset
ignored_features = []
if self.sanitize is False:
ignored_features += require_sanitized
ignored_features += not_implemented
# parse provided feature types
for feature_type in feature_types:
if self.sanitize is False and feature_type in require_sanitized:
if self.verbose:
warn('sanitize is set to False, %s feature will be ignored' %
feature_type)
continue
if feature_type in not_implemented:
if self.verbose:
warn('%s feature is not implemented yet and will be ignored' %
feature_type)
continue
if feature_type in self.FLAT_FEATURES:
self.feature_types.append((True, feature_type))
if self.flatten is False:
if self.verbose:
warn('%s feature is used, output will be flattened' % feature_type)
self.flatten = True
elif feature_type in self.VOXEL_FEATURES:
self.feature_types.append((False, feature_type))
elif feature_type == 'flat_combined':
self.feature_types += [(True, ftype)
for ftype in sorted(self.FLAT_FEATURES)
if ftype not in ignored_features]
if self.flatten is False:
if self.verbose:
warn('Flat features are used, output will be flattened')
self.flatten = True
elif feature_type == 'voxel_combined':
self.feature_types += [(False, ftype)
for ftype in sorted(self.VOXEL_FEATURES)
if ftype not in ignored_features]
elif feature_type == 'all_combined':
self.feature_types += [(True, ftype)
for ftype in sorted(self.FLAT_FEATURES)
if ftype not in ignored_features]
self.feature_types += [(False, ftype)
for ftype in sorted(self.VOXEL_FEATURES)
if ftype not in ignored_features]
if self.flatten is False:
if self.verbose:
warn('Flat feature are used, output will be flattened')
self.flatten = True
elif self.verbose:
warn('Ignoring unknown feature %s' % feature_type)
def _compute_feature(self, feature_name, prot_xyz, prot_rdk, lig_xyz, lig_rdk,
distances):
if feature_name == 'ecfp_ligand':
return [compute_ecfp_features(lig_rdk, self.ecfp_degree, self.ecfp_power)]
if feature_name == 'ecfp_hashed':
return [
self._vectorize(
hash_ecfp, feature_dict=ecfp_dict, channel_power=self.ecfp_power)
for ecfp_dict in featurize_binding_pocket_ecfp(
prot_xyz,
prot_rdk,
lig_xyz,
lig_rdk,
distances,
cutoff=self.cutoffs['ecfp_cutoff'],
ecfp_degree=self.ecfp_degree)
]
if feature_name == 'splif_hashed':
return [
self._vectorize(
hash_ecfp_pair,
feature_dict=splif_dict,
channel_power=self.splif_power) for splif_dict in featurize_splif(
prot_xyz, prot_rdk, lig_xyz, lig_rdk, self.cutoffs[
'splif_contact_bins'], distances, self.ecfp_degree)
]
if feature_name == 'hbond_count':
return [
self._vectorize(
hash_ecfp_pair, feature_list=hbond_list, channel_power=0)
for hbond_list in compute_hydrogen_bonds(
prot_xyz, prot_rdk, lig_xyz, lig_rdk, distances, self.cutoffs[
'hbond_dist_bins'], self.cutoffs['hbond_angle_cutoffs'])
]
if feature_name == 'ecfp':
return [
sum([
self._voxelize(
convert_atom_to_voxel,
hash_ecfp,
xyz,
feature_dict=ecfp_dict,
channel_power=self.ecfp_power)
for xyz, ecfp_dict in zip((prot_xyz, lig_xyz),
featurize_binding_pocket_ecfp(
prot_xyz,
prot_rdk,
lig_xyz,
lig_rdk,
distances,
cutoff=self.cutoffs['ecfp_cutoff'],
ecfp_degree=self.ecfp_degree))
])
]
if feature_name == 'splif':
return [
self._voxelize(
convert_atom_pair_to_voxel,
hash_ecfp_pair, (prot_xyz, lig_xyz),
feature_dict=splif_dict,
channel_power=self.splif_power) for splif_dict in featurize_splif(
prot_xyz, prot_rdk, lig_xyz, lig_rdk, self.cutoffs[
'splif_contact_bins'], distances, self.ecfp_degree)
]
if feature_name == 'sybyl':
return [
self._voxelize(
convert_atom_to_voxel,
lambda x: hash_sybyl(x, sybyl_types=self.sybyl_types),
xyz,
feature_dict=sybyl_dict,
nb_channel=len(self.sybyl_types))
for xyz, sybyl_dict in zip((prot_xyz, lig_xyz),
featurize_binding_pocket_sybyl(
prot_xyz,
prot_rdk,
lig_xyz,
lig_rdk,
distances,
cutoff=self.cutoffs['sybyl_cutoff']))
]
if feature_name == 'salt_bridge':
return [
self._voxelize(
convert_atom_pair_to_voxel,
None, (prot_xyz, lig_xyz),
feature_list=compute_salt_bridges(
prot_xyz,
prot_rdk,
lig_xyz,
lig_rdk,
distances,
cutoff=self.cutoffs['salt_bridges_cutoff']),
nb_channel=1)
]
if feature_name == 'charge':
return [
sum([
self._voxelize(
convert_atom_to_voxel,
None,
xyz,
feature_dict=compute_charge_dictionary(mol),
nb_channel=1,
dtype="np.float16")
for xyz, mol in ((prot_xyz, prot_rdk), (lig_xyz, lig_rdk))
])
]
if feature_name == 'hbond':
return [
self._voxelize(
convert_atom_pair_to_voxel,
None, (prot_xyz, lig_xyz),
feature_list=hbond_list,
channel_power=0) for hbond_list in compute_hydrogen_bonds(
prot_xyz, prot_rdk, lig_xyz, lig_rdk, distances, self.cutoffs[
'hbond_dist_bins'], self.cutoffs['hbond_angle_cutoffs'])
]
if feature_name == 'pi_stack':
return self._voxelize_pi_stack(prot_xyz, prot_rdk, lig_xyz, lig_rdk,
distances)
if feature_name == 'cation_pi':
return [
sum([
self._voxelize(
convert_atom_to_voxel,
None,
xyz,
feature_dict=cation_pi_dict,
nb_channel=1) for xyz, cation_pi_dict in zip(
(prot_xyz, lig_xyz),
compute_binding_pocket_cation_pi(
prot_rdk,
lig_rdk,
dist_cutoff=self.cutoffs['cation_pi_dist_cutoff'],
angle_cutoff=self.cutoffs['cation_pi_angle_cutoff'],
))
])
]
raise ValueError('Unknown feature type "%s"' % feature_name)
def _featurize_complex(self, mol_pdb_file, protein_pdb_file):
"""Computes grid featurization of protein/ligand complex.
Takes as input filenames pdb of the protein, pdb of the ligand.
This function then computes the centroid of the ligand; decrements this
centroid from the atomic coordinates of protein and ligand atoms, and then
merges the translated protein and ligand. This combined system/complex is then
saved.
This function then computes a featurization with scheme specified by the user.
Parameters
----------
mol_pdb_file: Str
Filename for ligand pdb file.
protein_pdb_file: Str
Filename for protein pdb file.
"""
try:
############################################################## TIMING
time1 = time.time()
############################################################## TIMING
protein_xyz, protein_rdk = load_molecule(
protein_pdb_file, calc_charges=True, sanitize=self.sanitize)
############################################################## TIMING
time2 = time.time()
log("TIMING: Loading protein coordinates took %0.3f s" % (time2 - time1),
self.verbose)
############################################################## TIMING
############################################################## TIMING
time1 = time.time()
############################################################## TIMING
ligand_xyz, ligand_rdk = load_molecule(
mol_pdb_file, calc_charges=True, sanitize=self.sanitize)
############################################################## TIMING
time2 = time.time()
log("TIMING: Loading ligand coordinates took %0.3f s" % (time2 - time1),
self.verbose)
############################################################## TIMING
except MoleculeLoadException:
logging.warning("Some molecules cannot be loaded by Rdkit. Skipping")
return None
############################################################## TIMING
time1 = time.time()
############################################################## TIMING
centroid = compute_centroid(ligand_xyz)
ligand_xyz = subtract_centroid(ligand_xyz, centroid)
protein_xyz = subtract_centroid(protein_xyz, centroid)
############################################################## TIMING
time2 = time.time()
log("TIMING: Centroid processing took %0.3f s" % (time2 - time1),
self.verbose)
############################################################## TIMING
pairwise_distances = compute_pairwise_distances(protein_xyz, ligand_xyz)
transformed_systems = {}
transformed_systems[(0, 0)] = [protein_xyz, ligand_xyz]
for i in range(self.nb_rotations):
rotated_system = rotate_molecules([protein_xyz, ligand_xyz])
transformed_systems[(i + 1, 0)] = rotated_system
features_dict = {}
for system_id, (protein_xyz, ligand_xyz) in transformed_systems.items():
feature_arrays = []
for is_flat, function_name in self.feature_types:
result = self._compute_feature(
function_name,
protein_xyz,
protein_rdk,
ligand_xyz,
ligand_rdk,
pairwise_distances,
)
feature_arrays += result
if self.flatten:
features_dict[system_id] = np.concatenate(
[feature_array.flatten() for feature_array in feature_arrays])
else:
features_dict[system_id] = np.concatenate(feature_arrays, axis=-1)
# TODO(rbharath): Is this squeeze OK?
features = np.squeeze(np.array(list(features_dict.values())))
return features
def _voxelize(self,
get_voxels,
hash_function,
coordinates,
feature_dict=None,
feature_list=None,
channel_power=None,
nb_channel=16,
dtype="np.int8"):
"""Private helper function to voxelize inputs.
Parameters
----------
get_voxels: function
Function that voxelizes inputs
hash_function: function
Used to map feature choices to voxel channels.
coordinates: np.ndarray
Contains the 3D coordinates of a molecular system.
feature_dict: Dictionary
Keys are atom indices.
feature_list: list
List of available features.
channel_power: int
If specified, nb_channel is set to 2**channel_power.
TODO: This feels like a redundant parameter.
nb_channel: int
The number of feature channels computed per voxel
dtype: type
The dtype of the numpy ndarray created to hold features.
"""
if channel_power is not None:
if channel_power == 0:
nb_channel = 1
else:
nb_channel = int(2**channel_power)
if dtype == "np.int8":
feature_tensor = np.zeros(
(self.voxels_per_edge, self.voxels_per_edge, self.voxels_per_edge,
nb_channel),
dtype=np.int8)
else:
feature_tensor = np.zeros(
(self.voxels_per_edge, self.voxels_per_edge, self.voxels_per_edge,
nb_channel),
dtype=np.float16)
if feature_dict is not None:
for key, features in feature_dict.items():
voxels = get_voxels(coordinates, key, self.box_width, self.voxel_width)
for voxel in voxels:
if ((voxel >= 0) & (voxel < self.voxels_per_edge)).all():
if hash_function is not None:
feature_tensor[voxel[0], voxel[1], voxel[2],
hash_function(features, channel_power)] += 1.0
else:
feature_tensor[voxel[0], voxel[1], voxel[2], 0] += features
elif feature_list is not None:
for key in feature_list:
voxels = get_voxels(coordinates, key, self.box_width, self.voxel_width)
for voxel in voxels:
if ((voxel >= 0) & (voxel < self.voxels_per_edge)).all():
feature_tensor[voxel[0], voxel[1], voxel[2], 0] += 1.0
return feature_tensor
def _voxelize_pi_stack(self, prot_xyz, prot_rdk, lig_xyz, lig_rdk, distances):
protein_pi_t, protein_pi_parallel, ligand_pi_t, ligand_pi_parallel = (
compute_pi_stack(
prot_rdk,
lig_rdk,
distances,
dist_cutoff=self.cutoffs['pi_stack_dist_cutoff'],
angle_cutoff=self.cutoffs['pi_stack_angle_cutoff']))
pi_parallel_tensor = self._voxelize(
convert_atom_to_voxel,
None,
prot_xyz,
feature_dict=protein_pi_parallel,
nb_channel=1)
pi_parallel_tensor += self._voxelize(
convert_atom_to_voxel,
None,
lig_xyz,
feature_dict=ligand_pi_parallel,
nb_channel=1)
pi_t_tensor = self._voxelize(
convert_atom_to_voxel,
None,
prot_xyz,
feature_dict=protein_pi_t,
nb_channel=1)
pi_t_tensor += self._voxelize(
convert_atom_to_voxel,
None,
lig_xyz,
feature_dict=ligand_pi_t,
nb_channel=1)
return [pi_parallel_tensor, pi_t_tensor]
def _vectorize(self,
hash_function,
feature_dict=None,
feature_list=None,
channel_power=10):
feature_vector = np.zeros(2**channel_power)
if feature_dict is not None:
on_channels = [
hash_function(feature, channel_power)
for key, feature in feature_dict.items()
]
feature_vector[on_channels] += 1
elif feature_list is not None:
feature_vector[0] += len(feature_list)
return feature_vector
|
ktaneishi/deepchem
|
deepchem/feat/rdkit_grid_featurizer.py
|
Python
|
mit
| 49,998
|
[
"RDKit"
] |
3b6f79b72a8f8d06f639f20eb03d4cb63aa48b72141d577e1d154677c6ad0e14
|
"""Implementation of the WebSocket protocol.
`WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional
communication between the browser and server.
WebSockets are supported in the current versions of all major browsers,
although older versions that do not support WebSockets are still in use
(refer to http://caniuse.com/websockets for details).
This module implements the final version of the WebSocket protocol as
defined in `RFC 6455 <http://tools.ietf.org/html/rfc6455>`_. Certain
browser versions (notably Safari 5.x) implemented an earlier draft of
the protocol (known as "draft 76") and are not compatible with this module.
.. versionchanged:: 4.0
Removed support for the draft 76 protocol version.
"""
from __future__ import absolute_import, division, print_function
# Author: Jacob Kristhammar, 2010
import base64
import collections
import hashlib
import os
import struct
import tornado.escape
import tornado.web
import zlib
from tornado.concurrent import Future
from tornado.escape import utf8, native_str, to_unicode
from tornado import gen, httpclient, httputil
from tornado.ioloop import IOLoop, PeriodicCallback
from tornado.iostream import StreamClosedError
from tornado.log import gen_log, app_log
from tornado import simple_httpclient
from tornado.tcpclient import TCPClient
from tornado.util import _websocket_mask, PY3
if PY3:
from urllib.parse import urlparse # py2
xrange = range
else:
from urlparse import urlparse # py3
class WebSocketError(Exception):
pass
class WebSocketClosedError(WebSocketError):
"""Raised by operations on a closed connection.
.. versionadded:: 3.2
"""
pass
class WebSocketHandler(tornado.web.RequestHandler):
"""Subclass this class to create a basic WebSocket handler.
Override `on_message` to handle incoming messages, and use
`write_message` to send messages to the client. You can also
override `open` and `on_close` to handle opened and closed
connections.
Custom upgrade response headers can be sent by overriding
`~tornado.web.RequestHandler.set_default_headers` or
`~tornado.web.RequestHandler.prepare`.
See http://dev.w3.org/html5/websockets/ for details on the
JavaScript interface. The protocol is specified at
http://tools.ietf.org/html/rfc6455.
Here is an example WebSocket handler that echos back all received messages
back to the client:
.. testcode::
class EchoWebSocket(tornado.websocket.WebSocketHandler):
def open(self):
print("WebSocket opened")
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print("WebSocket closed")
.. testoutput::
:hide:
WebSockets are not standard HTTP connections. The "handshake" is
HTTP, but after the handshake, the protocol is
message-based. Consequently, most of the Tornado HTTP facilities
are not available in handlers of this type. The only communication
methods available to you are `write_message()`, `ping()`, and
`close()`. Likewise, your request handler class should implement
`open()` method rather than ``get()`` or ``post()``.
If you map the handler above to ``/websocket`` in your application, you can
invoke it in JavaScript with::
var ws = new WebSocket("ws://localhost:8888/websocket");
ws.onopen = function() {
ws.send("Hello, world");
};
ws.onmessage = function (evt) {
alert(evt.data);
};
This script pops up an alert box that says "You said: Hello, world".
Web browsers allow any site to open a websocket connection to any other,
instead of using the same-origin policy that governs other network
access from javascript. This can be surprising and is a potential
security hole, so since Tornado 4.0 `WebSocketHandler` requires
applications that wish to receive cross-origin websockets to opt in
by overriding the `~WebSocketHandler.check_origin` method (see that
method's docs for details). Failure to do so is the most likely
cause of 403 errors when making a websocket connection.
When using a secure websocket connection (``wss://``) with a self-signed
certificate, the connection from a browser may fail because it wants
to show the "accept this certificate" dialog but has nowhere to show it.
You must first visit a regular HTML page using the same certificate
to accept it before the websocket connection will succeed.
If the application setting ``websocket_ping_interval`` has a non-zero
value, a ping will be sent periodically, and the connection will be
closed if a response is not received before the ``websocket_ping_timeout``.
Messages larger than the ``websocket_max_message_size`` application setting
(default 10MiB) will not be accepted.
.. versionchanged:: 4.5
Added ``websocket_ping_interval``, ``websocket_ping_timeout``, and
``websocket_max_message_size``.
"""
def __init__(self, application, request, **kwargs):
super(WebSocketHandler, self).__init__(application, request, **kwargs)
self.ws_connection = None
self.close_code = None
self.close_reason = None
self.stream = None
self._on_close_called = False
@tornado.web.asynchronous
def get(self, *args, **kwargs):
self.open_args = args
self.open_kwargs = kwargs
# Upgrade header should be present and should be equal to WebSocket
if self.request.headers.get("Upgrade", "").lower() != 'websocket':
self.set_status(400)
log_msg = "Can \"Upgrade\" only to \"WebSocket\"."
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Connection header should be upgrade.
# Some proxy servers/load balancers
# might mess with it.
headers = self.request.headers
connection = map(lambda s: s.strip().lower(),
headers.get("Connection", "").split(","))
if 'upgrade' not in connection:
self.set_status(400)
log_msg = "\"Connection\" must be \"Upgrade\"."
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Handle WebSocket Origin naming convention differences
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if "Origin" in self.request.headers:
origin = self.request.headers.get("Origin")
else:
origin = self.request.headers.get("Sec-Websocket-Origin", None)
# If there was an origin header, check to make sure it matches
# according to check_origin. When the origin is None, we assume it
# did not come from a browser and that it can be passed on.
if origin is not None and not self.check_origin(origin):
self.set_status(403)
log_msg = "Cross origin websockets not allowed"
self.finish(log_msg)
gen_log.debug(log_msg)
return
self.ws_connection = self.get_websocket_protocol()
if self.ws_connection:
self.ws_connection.accept_connection()
else:
self.set_status(426, "Upgrade Required")
self.set_header("Sec-WebSocket-Version", "7, 8, 13")
self.finish()
stream = None
@property
def ping_interval(self):
"""The interval for websocket keep-alive pings.
Set websocket_ping_interval = 0 to disable pings.
"""
return self.settings.get('websocket_ping_interval', None)
@property
def ping_timeout(self):
"""If no ping is received in this many seconds,
close the websocket connection (VPNs, etc. can fail to cleanly close ws connections).
Default is max of 3 pings or 30 seconds.
"""
return self.settings.get('websocket_ping_timeout', None)
@property
def max_message_size(self):
"""Maximum allowed message size.
If the remote peer sends a message larger than this, the connection
will be closed.
Default is 10MiB.
"""
return self.settings.get('websocket_max_message_size', None)
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket.
The message may be either a string or a dict (which will be
encoded as json). If the ``binary`` argument is false, the
message will be sent as utf8; in binary mode any byte string
is allowed.
If the connection is already closed, raises `WebSocketClosedError`.
.. versionchanged:: 3.2
`WebSocketClosedError` was added (previously a closed connection
would raise an `AttributeError`)
.. versionchanged:: 4.3
Returns a `.Future` which can be used for flow control.
"""
if self.ws_connection is None:
raise WebSocketClosedError()
if isinstance(message, dict):
message = tornado.escape.json_encode(message)
return self.ws_connection.write_message(message, binary=binary)
def select_subprotocol(self, subprotocols):
"""Invoked when a new WebSocket requests specific subprotocols.
``subprotocols`` is a list of strings identifying the
subprotocols proposed by the client. This method may be
overridden to return one of those strings to select it, or
``None`` to not select a subprotocol. Failure to select a
subprotocol does not automatically abort the connection,
although clients may close the connection if none of their
proposed subprotocols was selected.
"""
return None
def get_compression_options(self):
"""Override to return compression options for the connection.
If this method returns None (the default), compression will
be disabled. If it returns a dict (even an empty one), it
will be enabled. The contents of the dict may be used to
control the following compression options:
``compression_level`` specifies the compression level.
``mem_level`` specifies the amount of memory used for the internal compression state.
These parameters are documented in details here:
https://docs.python.org/3.6/library/zlib.html#zlib.compressobj
.. versionadded:: 4.1
.. versionchanged:: 4.5
Added ``compression_level`` and ``mem_level``.
"""
# TODO: Add wbits option.
return None
def open(self, *args, **kwargs):
"""Invoked when a new WebSocket is opened.
The arguments to `open` are extracted from the `tornado.web.URLSpec`
regular expression, just like the arguments to
`tornado.web.RequestHandler.get`.
"""
pass
def on_message(self, message):
"""Handle incoming messages on the WebSocket
This method must be overridden.
.. versionchanged:: 4.5
``on_message`` can be a coroutine.
"""
raise NotImplementedError
def ping(self, data):
"""Send ping frame to the remote end."""
if self.ws_connection is None:
raise WebSocketClosedError()
self.ws_connection.write_ping(data)
def on_pong(self, data):
"""Invoked when the response to a ping frame is received."""
pass
def on_ping(self, data):
"""Invoked when the a ping frame is received."""
pass
def on_close(self):
"""Invoked when the WebSocket is closed.
If the connection was closed cleanly and a status code or reason
phrase was supplied, these values will be available as the attributes
``self.close_code`` and ``self.close_reason``.
.. versionchanged:: 4.0
Added ``close_code`` and ``close_reason`` attributes.
"""
pass
def close(self, code=None, reason=None):
"""Closes this Web Socket.
Once the close handshake is successful the socket will be closed.
``code`` may be a numeric status code, taken from the values
defined in `RFC 6455 section 7.4.1
<https://tools.ietf.org/html/rfc6455#section-7.4.1>`_.
``reason`` may be a textual message about why the connection is
closing. These values are made available to the client, but are
not otherwise interpreted by the websocket protocol.
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.ws_connection:
self.ws_connection.close(code, reason)
self.ws_connection = None
def check_origin(self, origin):
"""Override to enable support for allowing alternate origins.
The ``origin`` argument is the value of the ``Origin`` HTTP
header, the url responsible for initiating this request. This
method is not called for clients that do not send this header;
such requests are always allowed (because all browsers that
implement WebSockets support this header, and non-browser
clients do not have the same cross-site security concerns).
Should return True to accept the request or False to reject it.
By default, rejects all requests with an origin on a host other
than this one.
This is a security protection against cross site scripting attacks on
browsers, since WebSockets are allowed to bypass the usual same-origin
policies and don't use CORS headers.
.. warning::
This is an important security measure; don't disable it
without understanding the security implications. In
particular, if your authentication is cookie-based, you
must either restrict the origins allowed by
``check_origin()`` or implement your own XSRF-like
protection for websocket connections. See `these
<https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html>`_
`articles
<https://devcenter.heroku.com/articles/websocket-security>`_
for more.
To accept all cross-origin traffic (which was the default prior to
Tornado 4.0), simply override this method to always return true::
def check_origin(self, origin):
return True
To allow connections from any subdomain of your site, you might
do something like::
def check_origin(self, origin):
parsed_origin = urllib.parse.urlparse(origin)
return parsed_origin.netloc.endswith(".mydomain.com")
.. versionadded:: 4.0
"""
parsed_origin = urlparse(origin)
origin = parsed_origin.netloc
origin = origin.lower()
host = self.request.headers.get("Host")
# Check to see that origin matches host directly, including ports
return origin == host
def set_nodelay(self, value):
"""Set the no-delay flag for this stream.
By default, small messages may be delayed and/or combined to minimize
the number of packets sent. This can sometimes cause 200-500ms delays
due to the interaction between Nagle's algorithm and TCP delayed
ACKs. To reduce this delay (at the expense of possibly increasing
bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
connection is established.
See `.BaseIOStream.set_nodelay` for additional details.
.. versionadded:: 3.1
"""
self.stream.set_nodelay(value)
def on_connection_close(self):
if self.ws_connection:
self.ws_connection.on_connection_close()
self.ws_connection = None
if not self._on_close_called:
self._on_close_called = True
self.on_close()
self._break_cycles()
def _break_cycles(self):
# WebSocketHandlers call finish() early, but we don't want to
# break up reference cycles (which makes it impossible to call
# self.render_string) until after we've really closed the
# connection (if it was established in the first place,
# indicated by status code 101).
if self.get_status() != 101 or self._on_close_called:
super(WebSocketHandler, self)._break_cycles()
def send_error(self, *args, **kwargs):
if self.stream is None:
super(WebSocketHandler, self).send_error(*args, **kwargs)
else:
# If we get an uncaught exception during the handshake,
# we have no choice but to abruptly close the connection.
# TODO: for uncaught exceptions after the handshake,
# we can close the connection more gracefully.
self.stream.close()
def get_websocket_protocol(self):
websocket_version = self.request.headers.get("Sec-WebSocket-Version")
if websocket_version in ("7", "8", "13"):
return WebSocketProtocol13(
self, compression_options=self.get_compression_options())
def _attach_stream(self):
self.stream = self.request.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
# disable non-WS methods
for method in ["write", "redirect", "set_header", "set_cookie",
"set_status", "flush", "finish"]:
setattr(self, method, _raise_not_supported_for_websockets)
def _raise_not_supported_for_websockets(*args, **kwargs):
raise RuntimeError("Method not supported for Web Sockets")
class WebSocketProtocol(object):
"""Base class for WebSocket protocol versions.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.stream = handler.stream
self.client_terminated = False
self.server_terminated = False
def _run_callback(self, callback, *args, **kwargs):
"""Runs the given callback with exception handling.
If the callback is a coroutine, returns its Future. On error, aborts the
websocket connection and returns None.
"""
try:
result = callback(*args, **kwargs)
except Exception:
app_log.error("Uncaught exception in %s",
getattr(self.request, 'path', None), exc_info=True)
self._abort()
else:
if result is not None:
result = gen.convert_yielded(result)
self.stream.io_loop.add_future(result, lambda f: f.result())
return result
def on_connection_close(self):
self._abort()
def _abort(self):
"""Instantly aborts the WebSocket connection by closing the socket"""
self.client_terminated = True
self.server_terminated = True
self.stream.close() # forcibly tear down the connection
self.close() # let the subclass cleanup
class _PerMessageDeflateCompressor(object):
def __init__(self, persistent, max_wbits, compression_options=None):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
# There is no symbolic constant for the minimum wbits value.
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if compression_options is None or 'compression_level' not in compression_options:
self._compression_level = tornado.web.GZipContentEncoding.GZIP_LEVEL
else:
self._compression_level = compression_options['compression_level']
if compression_options is None or 'mem_level' not in compression_options:
self._mem_level = 8
else:
self._mem_level = compression_options['mem_level']
if persistent:
self._compressor = self._create_compressor()
else:
self._compressor = None
def _create_compressor(self):
return zlib.compressobj(self._compression_level, zlib.DEFLATED, -self._max_wbits, self._mem_level)
def compress(self, data):
compressor = self._compressor or self._create_compressor()
data = (compressor.compress(data) +
compressor.flush(zlib.Z_SYNC_FLUSH))
assert data.endswith(b'\x00\x00\xff\xff')
return data[:-4]
class _PerMessageDeflateDecompressor(object):
def __init__(self, persistent, max_wbits, compression_options=None):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if persistent:
self._decompressor = self._create_decompressor()
else:
self._decompressor = None
def _create_decompressor(self):
return zlib.decompressobj(-self._max_wbits)
def decompress(self, data):
decompressor = self._decompressor or self._create_decompressor()
return decompressor.decompress(data + b'\x00\x00\xff\xff')
class WebSocketProtocol13(WebSocketProtocol):
"""Implementation of the WebSocket protocol from RFC 6455.
This class supports versions 7 and 8 of the protocol in addition to the
final version 13.
"""
# Bit masks for the first byte of a frame.
FIN = 0x80
RSV1 = 0x40
RSV2 = 0x20
RSV3 = 0x10
RSV_MASK = RSV1 | RSV2 | RSV3
OPCODE_MASK = 0x0f
def __init__(self, handler, mask_outgoing=False,
compression_options=None):
WebSocketProtocol.__init__(self, handler)
self.mask_outgoing = mask_outgoing
self._final_frame = False
self._frame_opcode = None
self._masked_frame = None
self._frame_mask = None
self._frame_length = None
self._fragmented_message_buffer = None
self._fragmented_message_opcode = None
self._waiting = None
self._compression_options = compression_options
self._decompressor = None
self._compressor = None
self._frame_compressed = None
# The total uncompressed size of all messages received or sent.
# Unicode messages are encoded to utf8.
# Only for testing; subject to change.
self._message_bytes_in = 0
self._message_bytes_out = 0
# The total size of all packets received or sent. Includes
# the effect of compression, frame overhead, and control frames.
self._wire_bytes_in = 0
self._wire_bytes_out = 0
self.ping_callback = None
self.last_ping = 0
self.last_pong = 0
def accept_connection(self):
try:
self._handle_websocket_headers()
except ValueError:
self.handler.set_status(400)
log_msg = "Missing/Invalid WebSocket headers"
self.handler.finish(log_msg)
gen_log.debug(log_msg)
return
try:
self._accept_connection()
except ValueError:
gen_log.debug("Malformed WebSocket request received",
exc_info=True)
self._abort()
return
def _handle_websocket_headers(self):
"""Verifies all invariant- and required headers
If a header is missing or have an incorrect value ValueError will be
raised
"""
fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
if not all(map(lambda f: self.request.headers.get(f), fields)):
raise ValueError("Missing/Invalid WebSocket headers")
@staticmethod
def compute_accept_value(key):
"""Computes the value for the Sec-WebSocket-Accept header,
given the value for Sec-WebSocket-Key.
"""
sha1 = hashlib.sha1()
sha1.update(utf8(key))
sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value
return native_str(base64.b64encode(sha1.digest()))
def _challenge_response(self):
return WebSocketProtocol13.compute_accept_value(
self.request.headers.get("Sec-Websocket-Key"))
def _accept_connection(self):
subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '')
subprotocols = [s.strip() for s in subprotocols.split(',')]
if subprotocols:
selected = self.handler.select_subprotocol(subprotocols)
if selected:
assert selected in subprotocols
self.handler.set_header("Sec-WebSocket-Protocol", selected)
extensions = self._parse_extensions_header(self.request.headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
# TODO: negotiate parameters if compression_options
# specifies limits.
self._create_compressors('server', ext[1], self._compression_options)
if ('client_max_window_bits' in ext[1] and
ext[1]['client_max_window_bits'] is None):
# Don't echo an offered client_max_window_bits
# parameter with no value.
del ext[1]['client_max_window_bits']
self.handler.set_header("Sec-WebSocket-Extensions",
httputil._encode_header(
'permessage-deflate', ext[1]))
break
self.handler.clear_header("Content-Type")
self.handler.set_status(101)
self.handler.set_header("Upgrade", "websocket")
self.handler.set_header("Connection", "Upgrade")
self.handler.set_header("Sec-WebSocket-Accept", self._challenge_response())
self.handler.finish()
self.handler._attach_stream()
self.stream = self.handler.stream
self.start_pinging()
self._run_callback(self.handler.open, *self.handler.open_args,
**self.handler.open_kwargs)
self._receive_frame()
def _parse_extensions_header(self, headers):
extensions = headers.get("Sec-WebSocket-Extensions", '')
if extensions:
return [httputil._parse_header(e.strip())
for e in extensions.split(',')]
return []
def _process_server_headers(self, key, headers):
"""Process the headers sent by the server to this client connection.
'key' is the websocket handshake challenge/response key.
"""
assert headers['Upgrade'].lower() == 'websocket'
assert headers['Connection'].lower() == 'upgrade'
accept = self.compute_accept_value(key)
assert headers['Sec-Websocket-Accept'] == accept
extensions = self._parse_extensions_header(headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
self._create_compressors('client', ext[1])
else:
raise ValueError("unsupported extension %r", ext)
def _get_compressor_options(self, side, agreed_parameters, compression_options=None):
"""Converts a websocket agreed_parameters set to keyword arguments
for our compressor objects.
"""
options = dict(
persistent=(side + '_no_context_takeover') not in agreed_parameters)
wbits_header = agreed_parameters.get(side + '_max_window_bits', None)
if wbits_header is None:
options['max_wbits'] = zlib.MAX_WBITS
else:
options['max_wbits'] = int(wbits_header)
options['compression_options'] = compression_options
return options
def _create_compressors(self, side, agreed_parameters, compression_options=None):
# TODO: handle invalid parameters gracefully
allowed_keys = set(['server_no_context_takeover',
'client_no_context_takeover',
'server_max_window_bits',
'client_max_window_bits'])
for key in agreed_parameters:
if key not in allowed_keys:
raise ValueError("unsupported compression parameter %r" % key)
other_side = 'client' if (side == 'server') else 'server'
self._compressor = _PerMessageDeflateCompressor(
**self._get_compressor_options(side, agreed_parameters, compression_options))
self._decompressor = _PerMessageDeflateDecompressor(
**self._get_compressor_options(other_side, agreed_parameters, compression_options))
def _write_frame(self, fin, opcode, data, flags=0):
if fin:
finbit = self.FIN
else:
finbit = 0
frame = struct.pack("B", finbit | opcode | flags)
l = len(data)
if self.mask_outgoing:
mask_bit = 0x80
else:
mask_bit = 0
if l < 126:
frame += struct.pack("B", l | mask_bit)
elif l <= 0xFFFF:
frame += struct.pack("!BH", 126 | mask_bit, l)
else:
frame += struct.pack("!BQ", 127 | mask_bit, l)
if self.mask_outgoing:
mask = os.urandom(4)
data = mask + _websocket_mask(mask, data)
frame += data
self._wire_bytes_out += len(frame)
return self.stream.write(frame)
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket."""
if binary:
opcode = 0x2
else:
opcode = 0x1
message = tornado.escape.utf8(message)
assert isinstance(message, bytes)
self._message_bytes_out += len(message)
flags = 0
if self._compressor:
message = self._compressor.compress(message)
flags |= self.RSV1
return self._write_frame(True, opcode, message, flags=flags)
def write_ping(self, data):
"""Send ping frame."""
assert isinstance(data, bytes)
self._write_frame(True, 0x9, data)
def _receive_frame(self):
try:
self.stream.read_bytes(2, self._on_frame_start)
except StreamClosedError:
self._abort()
def _on_frame_start(self, data):
self._wire_bytes_in += len(data)
header, payloadlen = struct.unpack("BB", data)
self._final_frame = header & self.FIN
reserved_bits = header & self.RSV_MASK
self._frame_opcode = header & self.OPCODE_MASK
self._frame_opcode_is_control = self._frame_opcode & 0x8
if self._decompressor is not None and self._frame_opcode != 0:
self._frame_compressed = bool(reserved_bits & self.RSV1)
reserved_bits &= ~self.RSV1
if reserved_bits:
# client is using as-yet-undefined extensions; abort
self._abort()
return
self._masked_frame = bool(payloadlen & 0x80)
payloadlen = payloadlen & 0x7f
if self._frame_opcode_is_control and payloadlen >= 126:
# control frames must have payload < 126
self._abort()
return
try:
if payloadlen < 126:
self._frame_length = payloadlen
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self._read_frame_data(False)
elif payloadlen == 126:
self.stream.read_bytes(2, self._on_frame_length_16)
elif payloadlen == 127:
self.stream.read_bytes(8, self._on_frame_length_64)
except StreamClosedError:
self._abort()
def _read_frame_data(self, masked):
new_len = self._frame_length
if self._fragmented_message_buffer is not None:
new_len += len(self._fragmented_message_buffer)
if new_len > (self.handler.max_message_size or 10 * 1024 * 1024):
self.close(1009, "message too big")
return
self.stream.read_bytes(
self._frame_length,
self._on_masked_frame_data if masked else self._on_frame_data)
def _on_frame_length_16(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!H", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self._read_frame_data(False)
except StreamClosedError:
self._abort()
def _on_frame_length_64(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!Q", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self._read_frame_data(False)
except StreamClosedError:
self._abort()
def _on_masking_key(self, data):
self._wire_bytes_in += len(data)
self._frame_mask = data
try:
self._read_frame_data(True)
except StreamClosedError:
self._abort()
def _on_masked_frame_data(self, data):
# Don't touch _wire_bytes_in; we'll do it in _on_frame_data.
self._on_frame_data(_websocket_mask(self._frame_mask, data))
def _on_frame_data(self, data):
handled_future = None
self._wire_bytes_in += len(data)
if self._frame_opcode_is_control:
# control frames may be interleaved with a series of fragmented
# data frames, so control frames must not interact with
# self._fragmented_*
if not self._final_frame:
# control frames must not be fragmented
self._abort()
return
opcode = self._frame_opcode
elif self._frame_opcode == 0: # continuation frame
if self._fragmented_message_buffer is None:
# nothing to continue
self._abort()
return
self._fragmented_message_buffer += data
if self._final_frame:
opcode = self._fragmented_message_opcode
data = self._fragmented_message_buffer
self._fragmented_message_buffer = None
else: # start of new data message
if self._fragmented_message_buffer is not None:
# can't start new message until the old one is finished
self._abort()
return
if self._final_frame:
opcode = self._frame_opcode
else:
self._fragmented_message_opcode = self._frame_opcode
self._fragmented_message_buffer = data
if self._final_frame:
handled_future = self._handle_message(opcode, data)
if not self.client_terminated:
if handled_future:
# on_message is a coroutine, process more frames once it's done.
handled_future.add_done_callback(
lambda future: self._receive_frame())
else:
self._receive_frame()
def _handle_message(self, opcode, data):
"""Execute on_message, returning its Future if it is a coroutine."""
if self.client_terminated:
return
if self._frame_compressed:
data = self._decompressor.decompress(data)
if opcode == 0x1:
# UTF-8 data
self._message_bytes_in += len(data)
try:
decoded = data.decode("utf-8")
except UnicodeDecodeError:
self._abort()
return
return self._run_callback(self.handler.on_message, decoded)
elif opcode == 0x2:
# Binary data
self._message_bytes_in += len(data)
return self._run_callback(self.handler.on_message, data)
elif opcode == 0x8:
# Close
self.client_terminated = True
if len(data) >= 2:
self.handler.close_code = struct.unpack('>H', data[:2])[0]
if len(data) > 2:
self.handler.close_reason = to_unicode(data[2:])
# Echo the received close code, if any (RFC 6455 section 5.5.1).
self.close(self.handler.close_code)
elif opcode == 0x9:
# Ping
try:
self._write_frame(True, 0xA, data)
except StreamClosedError:
self._abort()
self._run_callback(self.handler.on_ping, data)
elif opcode == 0xA:
# Pong
self.last_pong = IOLoop.current().time()
return self._run_callback(self.handler.on_pong, data)
else:
self._abort()
def close(self, code=None, reason=None):
"""Closes the WebSocket connection."""
if not self.server_terminated:
if not self.stream.closed():
if code is None and reason is not None:
code = 1000 # "normal closure" status code
if code is None:
close_data = b''
else:
close_data = struct.pack('>H', code)
if reason is not None:
close_data += utf8(reason)
try:
self._write_frame(True, 0x8, close_data)
except StreamClosedError:
self._abort()
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
self.stream.io_loop.remove_timeout(self._waiting)
self._waiting = None
self.stream.close()
elif self._waiting is None:
# Give the client a few seconds to complete a clean shutdown,
# otherwise just close the connection.
self._waiting = self.stream.io_loop.add_timeout(
self.stream.io_loop.time() + 5, self._abort)
@property
def ping_interval(self):
interval = self.handler.ping_interval
if interval is not None:
return interval
return 0
@property
def ping_timeout(self):
timeout = self.handler.ping_timeout
if timeout is not None:
return timeout
return max(3 * self.ping_interval, 30)
def start_pinging(self):
"""Start sending periodic pings to keep the connection alive"""
if self.ping_interval > 0:
self.last_ping = self.last_pong = IOLoop.current().time()
self.ping_callback = PeriodicCallback(
self.periodic_ping, self.ping_interval * 1000)
self.ping_callback.start()
def periodic_ping(self):
"""Send a ping to keep the websocket alive
Called periodically if the websocket_ping_interval is set and non-zero.
"""
if self.stream.closed() and self.ping_callback is not None:
self.ping_callback.stop()
return
# Check for timeout on pong. Make sure that we really have
# sent a recent ping in case the machine with both server and
# client has been suspended since the last ping.
now = IOLoop.current().time()
since_last_pong = now - self.last_pong
since_last_ping = now - self.last_ping
if (since_last_ping < 2 * self.ping_interval and
since_last_pong > self.ping_timeout):
self.close()
return
self.write_ping(b'')
self.last_ping = now
class WebSocketClientConnection(simple_httpclient._HTTPConnection):
"""WebSocket client connection.
This class should not be instantiated directly; use the
`websocket_connect` function instead.
"""
def __init__(self, request, on_message_callback=None,
compression_options=None, ping_interval=None, ping_timeout=None,
max_message_size=None):
self.compression_options = compression_options
self.connect_future = Future()
self.protocol = None
self.read_future = None
self.read_queue = collections.deque()
self.key = base64.b64encode(os.urandom(16))
self._on_message_callback = on_message_callback
self.close_code = self.close_reason = None
self.ping_interval = ping_interval
self.ping_timeout = ping_timeout
self.max_message_size = max_message_size
scheme, sep, rest = request.url.partition(':')
scheme = {'ws': 'http', 'wss': 'https'}[scheme]
request.url = scheme + sep + rest
request.headers.update({
'Upgrade': 'websocket',
'Connection': 'Upgrade',
'Sec-WebSocket-Key': self.key,
'Sec-WebSocket-Version': '13',
})
if self.compression_options is not None:
# Always offer to let the server set our max_wbits (and even though
# we don't offer it, we will accept a client_no_context_takeover
# from the server).
# TODO: set server parameters for deflate extension
# if requested in self.compression_options.
request.headers['Sec-WebSocket-Extensions'] = (
'permessage-deflate; client_max_window_bits')
self.tcp_client = TCPClient()
super(WebSocketClientConnection, self).__init__(
None, request, lambda: None, self._on_http_response,
104857600, self.tcp_client, 65536, 104857600)
def close(self, code=None, reason=None):
"""Closes the websocket connection.
``code`` and ``reason`` are documented under
`WebSocketHandler.close`.
.. versionadded:: 3.2
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.protocol is not None:
self.protocol.close(code, reason)
self.protocol = None
def on_connection_close(self):
if not self.connect_future.done():
self.connect_future.set_exception(StreamClosedError())
self.on_message(None)
self.tcp_client.close()
super(WebSocketClientConnection, self).on_connection_close()
def _on_http_response(self, response):
if not self.connect_future.done():
if response.error:
self.connect_future.set_exception(response.error)
else:
self.connect_future.set_exception(WebSocketError(
"Non-websocket response"))
def headers_received(self, start_line, headers):
if start_line.code != 101:
return super(WebSocketClientConnection, self).headers_received(
start_line, headers)
self.headers = headers
self.protocol = self.get_websocket_protocol()
self.protocol._process_server_headers(self.key, self.headers)
self.protocol.start_pinging()
self.protocol._receive_frame()
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
self.stream = self.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
# Once we've taken over the connection, clear the final callback
# we set on the http request. This deactivates the error handling
# in simple_httpclient that would otherwise interfere with our
# ability to see exceptions.
self.final_callback = None
self.connect_future.set_result(self)
def write_message(self, message, binary=False):
"""Sends a message to the WebSocket server."""
return self.protocol.write_message(message, binary)
def read_message(self, callback=None):
"""Reads a message from the WebSocket server.
If on_message_callback was specified at WebSocket
initialization, this function will never return messages
Returns a future whose result is the message, or None
if the connection is closed. If a callback argument
is given it will be called with the future when it is
ready.
"""
assert self.read_future is None
future = Future()
if self.read_queue:
future.set_result(self.read_queue.popleft())
else:
self.read_future = future
if callback is not None:
self.io_loop.add_future(future, callback)
return future
def on_message(self, message):
if self._on_message_callback:
self._on_message_callback(message)
elif self.read_future is not None:
self.read_future.set_result(message)
self.read_future = None
else:
self.read_queue.append(message)
def on_pong(self, data):
pass
def on_ping(self, data):
pass
def get_websocket_protocol(self):
return WebSocketProtocol13(self, mask_outgoing=True,
compression_options=self.compression_options)
def websocket_connect(url, callback=None, connect_timeout=None,
on_message_callback=None, compression_options=None,
ping_interval=None, ping_timeout=None,
max_message_size=None):
"""Client-side websocket support.
Takes a url and returns a Future whose result is a
`WebSocketClientConnection`.
``compression_options`` is interpreted in the same way as the
return value of `.WebSocketHandler.get_compression_options`.
The connection supports two styles of operation. In the coroutine
style, the application typically calls
`~.WebSocketClientConnection.read_message` in a loop::
conn = yield websocket_connect(url)
while True:
msg = yield conn.read_message()
if msg is None: break
# Do something with msg
In the callback style, pass an ``on_message_callback`` to
``websocket_connect``. In both styles, a message of ``None``
indicates that the connection has been closed.
.. versionchanged:: 3.2
Also accepts ``HTTPRequest`` objects in place of urls.
.. versionchanged:: 4.1
Added ``compression_options`` and ``on_message_callback``.
.. versionchanged:: 4.5
Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size``
arguments, which have the same meaning as in `WebSocketHandler`.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
if isinstance(url, httpclient.HTTPRequest):
assert connect_timeout is None
request = url
# Copy and convert the headers dict/object (see comments in
# AsyncHTTPClient.fetch)
request.headers = httputil.HTTPHeaders(request.headers)
else:
request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
request = httpclient._RequestProxy(
request, httpclient.HTTPRequest._DEFAULTS)
conn = WebSocketClientConnection(request,
on_message_callback=on_message_callback,
compression_options=compression_options,
ping_interval=ping_interval,
ping_timeout=ping_timeout,
max_message_size=max_message_size)
if callback is not None:
IOLoop.current().add_future(conn.connect_future, callback)
return conn.connect_future
|
legnaleurc/tornado
|
tornado/websocket.py
|
Python
|
apache-2.0
| 47,962
|
[
"VisIt"
] |
a6b451f090dbf3042f55b1907f513e80d84872d482dd584ad46e869e8c7a05be
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with utility functions for use in input files."""
from __future__ import division
import re
import sys
import os
import math
import numpy as np
from .exceptions import *
def oeprop(wfn, *args, **kwargs):
"""Evaluate one-electron properties.
:returns: None
:type wfn: :py:class:`~psi4.core.Wavefunction`
:param wfn: set of molecule, basis, orbitals from which to compute properties
How to specify args, which are actually the most important
:type title: string
:param title: label prepended to all psivars computed
:examples:
>>> # [1] Moments with specific label
>>> E, wfn = energy('hf', return_wfn=True)
>>> oeprop(wfn, 'DIPOLE', 'QUADRUPOLE', title='H3O+ SCF')
"""
oe = core.OEProp(wfn)
if 'title' in kwargs:
oe.set_title(kwargs['title'])
for prop in args:
oe.add(prop)
oe.compute()
def cubeprop(wfn, **kwargs):
"""Evaluate properties on a grid and generate cube files.
.. versionadded:: 0.5
*wfn* parameter passed explicitly
:returns: None
:type wfn: :py:class:`~psi4.core.Wavefunction`
:param wfn: set of molecule, basis, orbitals from which to generate cube files
:examples:
>>> # [1] Cube files for all orbitals
>>> E, wfn = energy('b3lyp', return_wfn=True)
>>> cubeprop(wfn)
>>> # [2] Cube files for density (alpha, beta, total, spin) and four orbitals
>>> # (two alpha, two beta)
>>> set cubeprop_tasks ['orbitals', 'density']
>>> set cubeprop_orbitals [5, 6, -5, -6]
>>> E, wfn = energy('scf', return_wfn=True)
>>> cubeprop(wfn)
"""
# By default compute the orbitals
if not core.has_global_option_changed('CUBEPROP_TASKS'):
core.set_global_option('CUBEPROP_TASKS',['ORBITALS'])
if ((core.get_global_option('INTEGRAL_PACKAGE') == 'ERD') and
('ESP' in core.get_global_option('CUBEPROP_TASKS'))):
raise ValidationError('INTEGRAL_PACKAGE ERD does not play nicely with electrostatic potential, so stopping.')
cp = core.CubeProperties(wfn)
cp.compute_properties()
def set_memory(inputval, execute=True):
"""Function to reset the total memory allocation. Takes memory value
*inputval* as type int, float, or str; int and float are taken literally
as bytes to be set, string taken as a unit-containing value (e.g., 30 mb)
which is case-insensitive. Set *execute* to False to interpret *inputval*
without setting in Psi4 core.
:returns: *memory_amount* (float) Number of bytes of memory set
:raises: ValidationError when <500MiB or disallowed type or misformatted
:examples:
>>> # [1] Passing absolute number of bytes
>>> psi4.set_memory(600000000)
>>> psi4.get_memory()
Out[1]: 600000000L
>>> # [2] Passing memory value as string with units
>>> psi4.set_memory('30 GB')
>>> psi4.get_memory()
Out[2]: 30000000000L
:good examples:
800000000 # 800000000
2004088624.9 # 2004088624
1.0e9 # 1000000000
'600 mb' # 600000000
'600.0 MiB' # 629145600
'.6 Gb' # 600000000
' 100000000kB ' # 100000000000
'2 eb' # 2000000000000000000
:bad examples:
{} # odd type
'' # no info
"8 dimms" # unacceptable units
"1e5 gb" # string w/ exponent
"5e5" # string w/o units
2000 # mem too small
-5e5 # negative (and too small)
"""
# Handle memory given in bytes directly (int or float)
if isinstance(inputval, (int, float)):
val = inputval
units = ''
# Handle memory given as a string
elif isinstance(inputval, str):
memory_string = re.compile(r'^\s*(\d*\.?\d+)\s*([KMGTPBE]i?B)\s*$', re.IGNORECASE)
matchobj = re.search(memory_string, inputval)
if matchobj:
val = float(matchobj.group(1))
units = matchobj.group(2)
else:
raise ValidationError("""Invalid memory specification: {}. Try 5e9 or '5 gb'.""".format(repr(inputval)))
else:
raise ValidationError("""Invalid type {} in memory specification: {}. Try 5e9 or '5 gb'.""".format(
type(inputval), repr(inputval)))
# Units decimal or binary?
multiplier = 1000
if "i" in units.lower():
multiplier = 1024
units = units.lower().replace("i", "").upper()
# Build conversion factor, convert units
unit_list = ["", "KB", "MB", "GB", "TB", "PB", "EB"]
mult = 1
for unit in unit_list:
if units.upper() == unit:
break
mult *= multiplier
memory_amount = int(val * mult)
# Check minimum memory requirement
min_mem_allowed = 262144000
if memory_amount < min_mem_allowed:
raise ValidationError("""set_memory(): Requested {:.3} MiB ({:.3} MB); minimum 250 MiB (263 MB). Please, sir, I want some more.""".format(
memory_amount / 1024 ** 2, memory_amount / 1000 ** 2))
if execute:
core.set_memory_bytes(memory_amount)
return memory_amount
def get_memory():
"""Function to return the total memory allocation."""
return core.get_memory()
def success(label):
"""Function to print a '*label*...PASSED' line to screen.
Used by :py:func:`util.compare_values` family when functions pass.
"""
msg = '\t{0:.<66}PASSED'.format(label)
print(msg)
sys.stdout.flush()
core.print_out(msg + '\n')
# Test functions
def compare_values(expected, computed, digits, label, exitonfail=True):
"""Function to compare two values. Prints :py:func:`util.success`
when value *computed* matches value *expected* to number of *digits*
(or to *digits* itself when *digits* < 1 e.g. digits=0.04). Performs
a system exit on failure unless *exitonfail* False, in which case
returns error message. Used in input files in the test suite.
"""
if digits > 1:
thresh = 10 ** -digits
message = ("\t%s: computed value (%.*f) does not match (%.*f) to %d digits." % (label, digits+1, computed, digits+1, expected, digits))
else:
thresh = digits
message = ("\t%s: computed value (%f) does not match (%f) to %f digits." % (label, computed, expected, digits))
if abs(expected - computed) > thresh:
print(message)
if exitonfail:
raise TestComparisonError(message)
if math.isnan(computed):
print(message)
print("\tprobably because the computed value is nan.")
if exitonfail:
raise TestComparisonError(message)
success(label)
return True
def compare_integers(expected, computed, label):
"""Function to compare two integers. Prints :py:func:`util.success`
when value *computed* matches value *expected*.
Performs a system exit on failure. Used in input files in the test suite.
"""
if (expected != computed):
message = ("\t%s: computed value (%d) does not match (%d)." % (label, computed, expected))
raise TestComparisonError(message)
success(label)
return True
def compare_strings(expected, computed, label):
"""Function to compare two strings. Prints :py:func:`util.success`
when string *computed* exactly matches string *expected*.
Performs a system exit on failure. Used in input files in the test suite.
"""
if(expected != computed):
message = ("\t%s: computed value (%s) does not match (%s)." % (label, computed, expected))
raise TestComparisonError(message)
success(label)
return True
def compare_matrices(expected, computed, digits, label):
"""Function to compare two matrices. Prints :py:func:`util.success`
when elements of matrix *computed* match elements of matrix *expected* to
number of *digits*. Performs a system exit on failure to match symmetry
structure, dimensions, or element values. Used in input files in the test suite.
"""
if (expected.nirrep() != computed.nirrep()):
message = ("\t%s has %d irreps, but %s has %d\n." % (expected.name(), expected.nirrep(), computed.name(), computed.nirrep()))
raise TestComparisonError(message)
if (expected.symmetry() != computed.symmetry()):
message = ("\t%s has %d symmetry, but %s has %d\n." % (expected.name(), expected.symmetry(), computed.name(), computed.symmetry()))
raise TestComparisonError(message)
nirreps = expected.nirrep()
symmetry = expected.symmetry()
for irrep in range(nirreps):
if(expected.rows(irrep) != computed.rows(irrep)):
message = ("\t%s has %d rows in irrep %d, but %s has %d\n." % (expected.name(), expected.rows(irrep), irrep, computed.name(), computed.rows(irrep)))
raise TestComparisonError(message)
if(expected.cols(irrep ^ symmetry) != computed.cols(irrep ^ symmetry)):
message = ("\t%s has %d columns in irrep, but %s has %d\n." % (expected.name(), expected.cols(irrep), irrep, computed.name(), computed.cols(irrep)))
raise TestComparisonError(message)
rows = expected.rows(irrep)
cols = expected.cols(irrep ^ symmetry)
failed = 0
for row in range(rows):
for col in range(cols):
if(abs(expected.get(irrep, row, col) - computed.get(irrep, row, col)) > 10 ** (-digits)):
print("\t%s: computed value (%s) does not match (%s)." % (label, expected.get(irrep, row, col), computed.get(irrep, row, col)))
failed = 1
break
if(failed):
print("Check your output file for reporting of the matrices.")
core.print_out("The Failed Test Matrices\n")
core.print_out("Computed Matrix (2nd matrix passed in)\n")
computed.print_out()
core.print_out("Expected Matrix (1st matrix passed in)\n")
expected.print_out()
raise TestComparisonError("\n")
success(label)
return True
def compare_vectors(expected, computed, digits, label):
"""Function to compare two vectors. Prints :py:func:`util.success`
when elements of vector *computed* match elements of vector *expected* to
number of *digits*. Performs a system exit on failure to match symmetry
structure, dimension, or element values. Used in input files in the test suite.
"""
if (expected.nirrep() != computed.nirrep()):
message = ("\t%s has %d irreps, but %s has %d\n." % (expected.name(), expected.nirrep(), computed.name(), computed.nirrep()))
raise TestComparisonError(message)
nirreps = expected.nirrep()
for irrep in range(nirreps):
if(expected.dim(irrep) != computed.dim(irrep)):
message = ("\tThe reference has %d entries in irrep %d, but the computed vector has %d\n." % (expected.dim(irrep), irrep, computed.dim(irrep)))
raise TestComparisonError(message)
dim = expected.dim(irrep)
failed = 0
for entry in range(dim):
if(abs(expected.get(irrep, entry) - computed.get(irrep, entry)) > 10 ** (-digits)):
failed = 1
break
if(failed):
core.print_out("The computed vector\n")
computed.print_out()
core.print_out("The reference vector\n")
expected.print_out()
message = ("\t%s: computed value (%s) does not match (%s)." % (label, computed.get(irrep, entry), expected.get(irrep, entry)))
raise TestComparisonError(message)
success(label)
return True
def compare_arrays(expected, computed, digits, label):
"""Function to compare two numpy arrays. Prints :py:func:`util.success`
when elements of vector *computed* match elements of vector *expected* to
number of *digits*. Performs a system exit on failure to match symmetry
structure, dimension, or element values. Used in input files in the test suite.
"""
try:
shape1 = expected.shape
shape2 = computed.shape
except:
raise TestComparisonError("Input objects do not have a shape attribute.")
if shape1 != shape2:
TestComparisonError("Input shapes do not match.")
tol = 10 ** (-digits)
if not np.allclose(expected, computed, atol=tol):
message = "\tArray difference norm is %12.6f." % np.linalg.norm(expected - computed)
raise TestComparisonError(message)
success(label)
return True
def compare_cubes(expected, computed, label):
"""Function to compare two cube files. Prints :py:func:`util.success`
when value *computed* matches value *expected*.
Performs a system exit on failure. Used in input files in the test suite.
"""
# Skip the first six elemets which are just labels
evec = [float(k) for k in expected.split()[6:]]
cvec = [float(k) for k in computed.split()[6:]]
if len(evec) == len(cvec):
for n in range(len(evec)):
if (math.fabs(evec[n]-cvec[n]) > 1.0e-4):
message = ("\t%s: computed cube file does not match expected cube file." % label)
raise TestComparisonError(message)
else:
message = ("\t%s: computed cube file does not match expected cube file." % (label, computed, expected))
raise TestComparisonError(message)
success(label)
return True
def copy_file_to_scratch(filename, prefix, namespace, unit, move = False):
"""Function to move file into scratch with correct naming
convention.
Arguments:
@arg filename full path to file
@arg prefix computation prefix, usually 'psi'
@arg namespace context namespace, usually molecule name
@arg unit unit number, e.g. 32
@arg move copy or move? (default copy)
Example:
Assume PID is 12345 and SCRATCH is /scratch/parrish/
copy_file_to_scratch('temp', 'psi', 'h2o', 32):
-cp ./temp /scratch/parrish/psi.12345.h2o.32
copy_file_to_scratch('/tmp/temp', 'psi', 'h2o', 32):
-cp /tmp/temp /scratch/parrish/psi.12345.h2o.32
copy_file_to_scratch('/tmp/temp', 'psi', '', 32):
-cp /tmp/temp /scratch/parrish/psi.12345.32
copy_file_to_scratch('/tmp/temp', 'psi', '', 32, True):
-mv /tmp/temp /scratch/parrish/psi.12345.32
"""
pid = str(os.getpid())
scratch = core.IOManager.shared_object().get_file_path(int(unit))
cp = '/bin/cp';
if move:
cp = '/bin/mv';
unit = str(unit)
target = ''
target += prefix
target += '.'
target += pid
if len(namespace):
target += '.'
target += namespace
target += '.'
target += unit
command = ('%s %s %s/%s' % (cp, filename, scratch, target))
os.system(command)
#print command
def copy_file_from_scratch(filename, prefix, namespace, unit, move = False):
"""Function to move file out of scratch with correct naming
convention.
Arguments:
@arg filename full path to target file
@arg prefix computation prefix, usually 'psi'
@arg namespace context namespace, usually molecule name
@arg unit unit number, e.g. 32
@arg move copy or move? (default copy)
Example:
Assume PID is 12345 and SCRATCH is /scratch/parrish/
copy_file_to_scratch('temp', 'psi', 'h2o', 32):
-cp /scratch/parrish/psi.12345.h2o.32 .temp
copy_file_to_scratch('/tmp/temp', 'psi', 'h2o', 32):
-cp /scratch/parrish/psi.12345.h2o.32 /tmp/temp
copy_file_to_scratch('/tmp/temp', 'psi', '', 32):
-cp /scratch/parrish/psi.12345.32 /tmp/temp
copy_file_to_scratch('/tmp/temp', 'psi', '', 32, True):
-mv /scratch/parrish/psi.12345.32 /tmp/temp
"""
pid = str(os.getpid())
scratch = core.IOManager.shared_object().get_file_path(int(unit))
cp = '/bin/cp';
if move:
cp = '/bin/mv';
unit = str(unit)
target = ''
target += prefix
target += '.'
target += pid
if len(namespace):
target += '.'
target += namespace
target += '.'
target += unit
command = ('%s %s/%s %s' % (cp, scratch, target, filename))
os.system(command)
def xml2dict(filename=None):
"""Read XML *filename* into nested OrderedDict-s. *filename* defaults to
active CSX file.
"""
import xmltodict as xd
if filename is None:
csx = os.path.splitext(core.outfile_name())[0] + '.csx'
else:
csx = filename
with open(csx, 'r') as handle:
csxdict = xd.parse(handle)
return csxdict
def getFromDict(dataDict, mapList):
return reduce(lambda d, k: d[k], mapList, dataDict)
def csx2endict():
"""Grabs the CSX file as a dictionary, encodes translation of PSI variables
to XML blocks, gathers all available energies from CSX file into returned
dictionary.
"""
blockprefix = ['chemicalSemantics', 'molecularCalculation', 'quantumMechanics', 'singleReferenceState', 'singleDeterminant']
blockmidfix = ['energies', 'energy']
prefix = 'cs:'
pv2xml = {
'MP2 CORRELATION ENERGY': [['mp2'], 'correlation'],
'MP2 SAME-SPIN CORRELATION ENERGY': [['mp2'], 'sameSpin correlation'],
'HF TOTAL ENERGY': [['abinitioScf'], 'electronic'],
'NUCLEAR REPULSION ENERGY': [['abinitioScf'], 'nuclearRepulsion'],
'DFT FUNCTIONAL TOTAL ENERGY': [['dft'], 'dftFunctional'],
'DFT TOTAL ENERGY': [['dft'], 'electronic'],
'DOUBLE-HYBRID CORRECTION ENERGY': [['dft'], 'doubleHybrid correction'],
'DISPERSION CORRECTION ENERGY': [['dft'], 'dispersion correction'],
}
csxdict = xml2dict()
enedict = {}
for pv, lpv in pv2xml.items():
address = blockprefix + lpv[0] + blockmidfix
indices = [prefix + bit for bit in address]
try:
qwer = getFromDict(csxdict, indices)
except KeyError:
continue
for v in qwer:
vv = v.values()
if vv[0] == prefix + lpv[1]:
enedict[pv] = float(vv[1])
return enedict
def compare_csx():
"""Function to validate energies in CSX files against PSIvariables. Only
active if write_csx flag on.
"""
if 'csx4psi' in sys.modules.keys():
if core.get_global_option('WRITE_CSX'):
enedict = csx2endict()
compare_integers(len(enedict) >= 2, True, 'CSX harvested')
for pv, en in enedict.items():
compare_values(core.get_variable(pv), en, 6, 'CSX ' + pv + ' ' + str(round(en, 4)))
|
kratman/psi4public
|
psi4/driver/p4util/util.py
|
Python
|
gpl-2.0
| 19,422
|
[
"Psi4"
] |
b4bbf78ab44ee5aed217566d7191cb8f322419794391bdbe2524ac963aea8144
|
"""Save pairwise RMSD, TFDs, and E3FP TCs between conformer pairs.
Authors: Seth Axen
E-mail: seth.axen@gmail.com
"""
import argparse
import numpy as np
import rdkit.Chem
from rdkit.Chem import rdMolAlign, TorsionFingerprints
from e3fp.pipeline import params_to_dicts, fprints_from_mol
from e3fp_paper.pipeline import load_params
from e3fp.fingerprint.metrics import tanimoto
_, FPRINT_PARAMS = params_to_dicts(load_params())
LOG_FREQ = 20 # min number of mol pairs between saves
def get_rmsd(mol):
rms, tmat = rdMolAlign.GetAlignmentTransform(mol, mol, prbCid=0, refCid=1)
return rms
def get_tfd(mol):
return TorsionFingerprints.GetTFDBetweenConformers(mol, [0], [1])
def get_e3fp_tc(mol):
fprints = fprints_from_mol(mol, fprint_params=FPRINT_PARAMS)
return tanimoto(fprints[0], fprints[1])
def main(sdf_file, tfds_file='tfds.bin', rmsds_file='rmsds.bin',
e3fp_tcs_file='e3fp_tcs.bin', log_freq=LOG_FREQ):
rmsds = []
tfds = []
e3fp_tcs = []
i = 0
supp = rdkit.Chem.SDMolSupplier(sdf_file)
while True:
try:
mol = next(supp)
tmp_mol = next(supp)
except StopIteration:
break
mol.AddConformer(tmp_mol.GetConformer(0), assignId=True)
rmsds.append(get_rmsd(mol))
tfds.append(get_tfd(mol))
e3fp_tcs.append(get_e3fp_tc(mol))
i += 1
if i > 0 and i % log_freq == 0:
print(i)
np.asarray(rmsds, dtype=np.double).tofile(rmsds_file, format="d")
np.asarray(tfds, dtype=np.double).tofile(tfds_file, format="d")
np.asarray(e3fp_tcs, dtype=np.double).tofile(e3fp_tcs_file, format="d")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
"""Save pairwise RMSD, TFDs, and E3FP TCs between conformer pairs.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('sdf_file', type=str,
help="""Path to SDF file containing mols.""")
parser.add_argument('--rmsds_file', type=str,
help="""Path to output file with RMSDs.""")
parser.add_argument('--tfds_file', type=str,
help="""Path to output file with Torsion Fingerprint
Deviations (TFDs).""")
parser.add_argument('--e3fp_tcs_file', type=str,
help="""Path to output file with E3FP TCs.""")
params = parser.parse_args()
main(params.sdf_file, rmsds_file=params.rmsds_file,
tfds_file=params.tfds_file, e3fp_tcs_file=params.e3fp_tcs_file)
|
keiserlab/e3fp-paper
|
project/fingerprint_comparison/random_conformer_pairs/get_pair_rmsds_tfds_tcs.py
|
Python
|
lgpl-3.0
| 2,568
|
[
"RDKit"
] |
6faee5375c576a7c1d8ac31b0a08bd8254faffd8c5e74c8bffbdd144af7d1dc2
|
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
import os, shutil, zipfile, re, time, sys, datetime, platform
from os.path import join, exists, dirname, isdir
from argparse import ArgumentParser, REMAINDER
import StringIO
import xml.dom.minidom
import subprocess
import mx
import mx_gate
import mx_unittest
from mx_gate import Task
from mx_unittest import unittest
_suite = mx.suite('jvmci')
JVMCI_VERSION = 9
"""
Top level directory of the JDK source workspace.
"""
_jdkSourceRoot = dirname(_suite.dir)
_JVMCI_JDK_TAG = 'jvmci'
_minVersion = mx.VersionSpec('1.9')
# max version (first _unsupported_ version)
_untilVersion = None
_jvmciModes = {
'hosted' : ['-XX:+UnlockExperimentalVMOptions', '-XX:+EnableJVMCI'],
'jit' : ['-XX:+UnlockExperimentalVMOptions', '-XX:+EnableJVMCI', '-XX:+UseJVMCICompiler'],
'disabled' : []
}
# TODO: can optimized be built without overriding release build?
_jdkDebugLevels = ['release', 'fastdebug', 'slowdebug']
# TODO: add client once/if it can be built on 64-bit platforms
_jdkJvmVariants = ['server']
"""
Translation table from mx_jvmci:8 --vmbuild values to mx_jvmci:9 --jdk-debug-level values.
"""
_legacyVmbuilds = {
'product' : 'release',
'debug' : 'slowdebug'
}
"""
Translates a mx_jvmci:8 --vmbuild value to a mx_jvmci:9 --jdk-debug-level value.
"""
def _translateLegacyDebugLevel(debugLevel):
return _legacyVmbuilds.get(debugLevel, debugLevel)
"""
Translation table from mx_jvmci:8 --vm values to mx_jvmci:9 (--jdk-jvm-variant, --jvmci-mode) tuples.
"""
_legacyVms = {
'jvmci' : ('server', 'jit')
}
"""
A VM configuration composed of a JDK debug level, JVM variant and a JVMCI mode.
This is also a context manager that can be used with the 'with' statement to set/change
a VM configuration within a dynamic scope. For example:
with ConfiguredJDK(debugLevel='fastdebug'):
dacapo(['pmd'])
"""
class VM:
def __init__(self, jvmVariant=None, debugLevel=None, jvmciMode=None):
self.update(jvmVariant, debugLevel, jvmciMode)
def update(self, jvmVariant=None, debugLevel=None, jvmciMode=None):
if jvmVariant in _legacyVms:
# Backwards compatibility for mx_jvmci:8 API
jvmVariant, newJvmciMode = _legacyVms[jvmVariant]
if jvmciMode is not None and jvmciMode != newJvmciMode:
mx.abort('JVM variant "' + jvmVariant + '" implies JVMCI mode "' + newJvmciMode +
'" which conflicts with explicitly specified JVMCI mode of "' + jvmciMode + '"')
jvmciMode = newJvmciMode
debugLevel = _translateLegacyDebugLevel(debugLevel)
assert jvmVariant is None or jvmVariant in _jdkJvmVariants, jvmVariant
assert debugLevel is None or debugLevel in _jdkDebugLevels, debugLevel
assert jvmciMode is None or jvmciMode in _jvmciModes, jvmciMode
self.jvmVariant = jvmVariant or _vm.jvmVariant
self.debugLevel = debugLevel or _vm.debugLevel
self.jvmciMode = jvmciMode or _vm.jvmciMode
def __enter__(self):
global _vm
self.previousVm = _vm
_vm = self
def __exit__(self, exc_type, exc_value, traceback):
global _vm
_vm = self.previousVm
_vm = VM(jvmVariant=_jdkJvmVariants[0], debugLevel=_jdkDebugLevels[0], jvmciMode='hosted')
def get_vm():
"""
Gets the configured VM.
"""
return _vm
def relativeVmLibDirInJdk():
mxos = mx.get_os()
if mxos == 'darwin':
return join('lib')
if mxos == 'windows' or mxos == 'cygwin':
return join('bin')
return join('lib', mx.get_arch())
def isJVMCIEnabled(vm):
assert vm in _jdkJvmVariants
return True
class JvmciJDKDeployedDist(object):
def __init__(self, name, compilers=False):
self._name = name
self._compilers = compilers
def dist(self):
return mx.distribution(self._name)
def deploy(self, jdkDir):
mx.nyi('deploy', self)
def post_parse_cmd_line(self):
self.set_archiveparticipant()
def set_archiveparticipant(self):
dist = self.dist()
dist.set_archiveparticipant(JVMCIArchiveParticipant(dist))
class ExtJDKDeployedDist(JvmciJDKDeployedDist):
def __init__(self, name):
JvmciJDKDeployedDist.__init__(self, name)
"""
The monolithic JVMCI distribution is deployed through use of -Xbootclasspath/p
so that it's not necessary to run JDK make after editing JVMCI sources.
The latter causes all JDK Java sources to be rebuilt since JVMCI is
(currently) in java.base.
"""
_monolithicJvmci = JvmciJDKDeployedDist('JVMCI')
"""
List of distributions that are deployed on the boot class path.
Note: In jvmci-8, they were deployed directly into the JDK directory.
"""
jdkDeployedDists = [_monolithicJvmci]
def _makehelp():
return subprocess.check_output([mx.gmake_cmd(), 'help'], cwd=_jdkSourceRoot)
def _runmake(args):
"""run the JDK make process
To build hotspot and import it into the JDK: "mx make hotspot import-hotspot"
{0}"""
jdkBuildDir = _get_jdk_build_dir()
if not exists(jdkBuildDir):
# JDK9 must be bootstrapped with a JDK8
compliance = mx.JavaCompliance('8')
jdk8 = mx.get_jdk(compliance.exactMatch, versionDescription=compliance.value)
cmd = ['sh', 'configure', '--with-debug-level=' + _vm.debugLevel, '--with-native-debug-symbols=none', '--disable-precompiled-headers',
'--with-jvm-variants=' + _vm.jvmVariant, '--disable-warnings-as-errors', '--with-boot-jdk=' + jdk8.home]
mx.run(cmd, cwd=_jdkSourceRoot)
cmd = [mx.gmake_cmd(), 'CONF=' + _vm.debugLevel]
if mx.get_opts().verbose:
cmd.append('LOG=debug')
cmd.extend(args)
if mx.get_opts().use_jdk_image and 'images' not in args:
cmd.append('images')
if not mx.get_opts().verbose:
mx.log('--------------- make execution ----------------------')
mx.log('Working directory: ' + _jdkSourceRoot)
mx.log('Command line: ' + ' '.join(cmd))
mx.log('-----------------------------------------------------')
mx.run(cmd, cwd=_jdkSourceRoot)
if 'images' in cmd:
jdkImageDir = join(jdkBuildDir, 'images', 'jdk')
# The OpenJDK build creates an empty cacerts file so copy one from
# the default JDK (which is assumed to be an OracleJDK)
srcCerts = join(mx.get_jdk(tag='default').home, 'jre', 'lib', 'security', 'cacerts')
dstCerts = join(jdkImageDir, 'lib', 'security', 'cacerts')
shutil.copyfile(srcCerts, dstCerts)
_create_jdk_bundle(jdkBuildDir, _vm.debugLevel, jdkImageDir)
def _get_jdk_bundle_arches():
"""
Gets a list of names that will be the part of a JDK bundle's file name denoting the architecture.
The first element in the list is the canonical name. Symlinks should be created for the
remaining names.
"""
cpu = mx.get_arch()
if cpu == 'amd64':
return ['x64', 'x86_64', 'amd64']
elif cpu == 'sparcv9':
return ['sparcv9']
mx.abort('Unsupported JDK bundle arch: ' + cpu)
def _create_jdk_bundle(jdkBuildDir, debugLevel, jdkImageDir):
"""
Creates a tar.gz JDK archive, an accompanying tar.gz.sha1 file with its
SHA1 signature plus symlinks to the archive for non-canonical architecture names.
"""
arches = _get_jdk_bundle_arches()
jdkTgzPath = join(_suite.get_output_root(), 'jdk-bundles', 'jdk9-{}-{}-{}.tar.gz'.format(debugLevel, _get_openjdk_os(), arches[0]))
with mx.Archiver(jdkTgzPath, kind='tgz') as arc:
mx.log('Creating ' + jdkTgzPath)
for root, _, filenames in os.walk(jdkImageDir):
for name in filenames:
f = join(root, name)
arcname = 'jdk1.9.0/' + os.path.relpath(f, jdkImageDir)
arc.zf.add(name=f, arcname=arcname, recursive=False)
with open(jdkTgzPath + '.sha1', 'w') as fp:
mx.log('Creating ' + jdkTgzPath + '.sha1')
fp.write(mx.sha1OfFile(jdkTgzPath))
def _create_link(source, link_name):
if exists(link_name):
os.remove(link_name)
mx.log('Creating ' + link_name + ' -> ' + source)
os.symlink(source, link_name)
for arch in arches[1:]:
link_name = join(_suite.get_output_root(), 'jdk-bundles', 'jdk9-{}-{}-{}.tar.gz'.format(debugLevel, _get_openjdk_os(), arch))
jdkTgzName = os.path.basename(jdkTgzPath)
_create_link(jdkTgzName, link_name)
_create_link(jdkTgzName + '.sha1', link_name + '.sha1')
def _runmultimake(args):
"""run the JDK make process for one or more configurations"""
jvmVariantsDefault = ','.join(_jdkJvmVariants)
debugLevelsDefault = ','.join(_jdkDebugLevels)
parser = ArgumentParser(prog='mx multimake')
parser.add_argument('--jdk-jvm-variants', '--vms', help='a comma separated list of VMs to build (default: ' + jvmVariantsDefault + ')', metavar='<args>', default=jvmVariantsDefault)
parser.add_argument('--jdk-debug-levels', '--builds', help='a comma separated list of JDK debug levels (default: ' + debugLevelsDefault + ')', metavar='<args>', default=debugLevelsDefault)
parser.add_argument('-n', '--no-check', action='store_true', help='omit running "java -version" after each build')
select = parser.add_mutually_exclusive_group()
select.add_argument('-c', '--console', action='store_true', help='send build output to console instead of log files')
select.add_argument('-d', '--output-dir', help='directory for log files instead of current working directory', default=os.getcwd(), metavar='<dir>')
args = parser.parse_args(args)
jvmVariants = args.jdk_jvm_variants.split(',')
debugLevels = [_translateLegacyDebugLevel(dl) for dl in args.jdk_debug_levels.split(',')]
allStart = time.time()
for jvmVariant in jvmVariants:
for debugLevel in debugLevels:
if not args.console:
logFile = join(mx.ensure_dir_exists(args.output_dir), jvmVariant + '-' + debugLevel + '.log')
log = open(logFile, 'wb')
start = time.time()
mx.log('BEGIN: ' + jvmVariant + '-' + debugLevel + '\t(see: ' + logFile + ')')
verbose = ['-v'] if mx.get_opts().verbose else []
# Run as subprocess so that output can be directed to a file
cmd = [sys.executable, '-u', mx.__file__] + verbose + ['--jdk-jvm-variant=' + jvmVariant, '--jdk-debug-level=' + debugLevel, 'make']
mx.logv("executing command: " + str(cmd))
subprocess.check_call(cmd, cwd=_suite.dir, stdout=log, stderr=subprocess.STDOUT)
duration = datetime.timedelta(seconds=time.time() - start)
mx.log('END: ' + jvmVariant + '-' + debugLevel + '\t[' + str(duration) + ']')
else:
with VM(jvmVariant=jvmVariant, debugLevel=debugLevel):
_runmake([])
if not args.no_check:
with VM(jvmciMode='jit'):
run_vm(['-XX:-BootstrapJVMCI', '-version'])
allDuration = datetime.timedelta(seconds=time.time() - allStart)
mx.log('TOTAL TIME: ' + '[' + str(allDuration) + ']')
class HotSpotProject(mx.NativeProject):
"""
Defines a NativeProject representing the HotSpot binaries built via make.
"""
def __init__(self, suite, name, deps, workingSets, **args):
assert name == 'hotspot'
mx.NativeProject.__init__(self, suite, name, "", [], deps, workingSets, None, None, join(suite.mxDir, name))
def eclipse_config_up_to_date(self, configZip):
# Assume that any change to this module might imply changes to the generated IDE files
if configZip.isOlderThan(__file__):
return False
for _, source in self._get_eclipse_settings_sources().iteritems():
if configZip.isOlderThan(source):
return False
return True
def _get_eclipse_settings_sources(self):
"""
Gets a dictionary from the name of an Eclipse settings file to
the file providing its generated content.
"""
if not hasattr(self, '_eclipse_settings'):
esdict = {}
templateSettingsDir = join(self.dir, 'templates', 'eclipse', 'settings')
if exists(templateSettingsDir):
for name in os.listdir(templateSettingsDir):
source = join(templateSettingsDir, name)
esdict[name] = source
self._eclipse_settings = esdict
return self._eclipse_settings
def _eclipseinit(self, files=None, libFiles=None):
"""
Generates an Eclipse project for each HotSpot build configuration.
"""
roots = [
'ASSEMBLY_EXCEPTION',
'LICENSE',
'README',
'THIRD_PARTY_README',
'agent',
'make',
'src',
'test'
]
for jvmVariant in _jdkJvmVariants:
for debugLevel in _jdkDebugLevels:
name = jvmVariant + '-' + debugLevel
eclProjectDir = join(self.dir, 'eclipse', name)
mx.ensure_dir_exists(eclProjectDir)
out = mx.XMLDoc()
out.open('projectDescription')
out.element('name', data='hotspot:' + name)
out.element('comment', data='')
out.element('projects', data='')
out.open('buildSpec')
out.open('buildCommand')
out.element('name', data='org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder')
out.element('triggers', data='full,incremental')
out.element('arguments', data='')
out.close('buildCommand')
out.close('buildSpec')
out.open('natures')
out.element('nature', data='org.eclipse.cdt.core.cnature')
out.element('nature', data='org.eclipse.cdt.core.ccnature')
out.element('nature', data='org.eclipse.cdt.managedbuilder.core.managedBuildNature')
out.element('nature', data='org.eclipse.cdt.managedbuilder.core.ScannerConfigNature')
out.close('natures')
if roots:
out.open('linkedResources')
for r in roots:
f = join(_suite.dir, r)
out.open('link')
out.element('name', data=r)
out.element('type', data='2' if isdir(f) else '1')
out.element('locationURI', data=mx.get_eclipse_project_rel_locationURI(f, eclProjectDir))
out.close('link')
out.open('link')
out.element('name', data='generated')
out.element('type', data='2')
generated = join(_get_hotspot_build_dir(jvmVariant, debugLevel), 'generated')
out.element('locationURI', data=mx.get_eclipse_project_rel_locationURI(generated, eclProjectDir))
out.close('link')
out.close('linkedResources')
out.close('projectDescription')
projectFile = join(eclProjectDir, '.project')
mx.update_file(projectFile, out.xml(indent='\t', newl='\n'))
if files:
files.append(projectFile)
cprojectTemplate = join(self.dir, 'templates', 'eclipse', 'cproject')
cprojectFile = join(eclProjectDir, '.cproject')
with open(cprojectTemplate) as f:
content = f.read()
mx.update_file(cprojectFile, content)
if files:
files.append(cprojectFile)
settingsDir = join(eclProjectDir, ".settings")
mx.ensure_dir_exists(settingsDir)
for name, source in self._get_eclipse_settings_sources().iteritems():
out = StringIO.StringIO()
print >> out, '# GENERATED -- DO NOT EDIT'
print >> out, '# Source:', source
with open(source) as f:
print >> out, f.read()
content = out.getvalue()
mx.update_file(join(settingsDir, name), content)
if files:
files.append(join(settingsDir, name))
def getBuildTask(self, args):
return JDKBuildTask(self, args, _vm.debugLevel, _vm.jvmVariant)
class JDKBuildTask(mx.NativeBuildTask):
def __init__(self, project, args, debugLevel, jvmVariant):
mx.NativeBuildTask.__init__(self, args, project)
self.jvmVariant = jvmVariant
self.debugLevel = debugLevel
def __str__(self):
return 'Building JDK[{}, {}]'.format(self.debugLevel, self.jvmVariant)
def build(self):
if mx.get_opts().use_jdk_image:
_runmake(['images'])
else:
_runmake([])
self._newestOutput = None
def clean(self, forBuild=False):
if forBuild: # Let make handle incremental builds
return
if exists(_get_jdk_build_dir(self.debugLevel)):
_runmake(['clean'])
self._newestOutput = None
# Backwards compatibility for mx_jvmci:8 API
def buildvms(args):
_runmultimake(args)
def run_vm(args, vm=None, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, debugLevel=None, vmbuild=None):
"""run a Java program by executing the java executable in a JVMCI JDK"""
jdkTag = mx.get_jdk_option().tag
if jdkTag and jdkTag != _JVMCI_JDK_TAG:
mx.abort('The "--jdk" option must have the tag "' + _JVMCI_JDK_TAG + '" when running a command requiring a JVMCI VM')
jdk = get_jvmci_jdk(debugLevel=debugLevel or _translateLegacyDebugLevel(vmbuild))
return jdk.run_java(args, nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd, timeout=timeout)
def _unittest_vm_launcher(vmArgs, mainClass, mainClassArgs):
run_vm(vmArgs + [mainClass] + mainClassArgs)
mx_unittest.set_vm_launcher('JVMCI VM launcher', _unittest_vm_launcher)
def _jvmci_gate_runner(args, tasks):
# Build release server VM now so we can run the unit tests
with Task('BuildHotSpotJVMCIHosted: release', tasks) as t:
if t: _runmultimake(['--jdk-jvm-variants', 'server', '--jdk-debug-levels', 'release'])
# Run unit tests in hosted mode
with VM(jvmVariant='server', debugLevel='release', jvmciMode='hosted'):
with Task('JVMCI UnitTests: hosted-release', tasks) as t:
if t: unittest(['--suite', 'jvmci', '--enable-timing', '--verbose', '--fail-fast'])
# Build the other VM flavors
with Task('BuildHotSpotJVMCIOthers: fastdebug', tasks) as t:
if t: _runmultimake(['--jdk-jvm-variants', 'server', '--jdk-debug-levels', 'fastdebug'])
with Task('CleanAndBuildIdealGraphVisualizer', tasks, disableJacoco=True) as t:
if t and platform.processor() != 'sparc':
buildxml = mx._cygpathU2W(join(_suite.dir, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'build.xml'))
mx.run(['ant', '-f', buildxml, '-q', 'clean', 'build'], env=_igvBuildEnv())
mx_gate.add_gate_runner(_suite, _jvmci_gate_runner)
mx_gate.add_gate_argument('-g', '--only-build-jvmci', action='store_false', dest='buildNonJVMCI', help='only build the JVMCI VM')
def _igvJdk():
v8u20 = mx.VersionSpec("1.8.0_20")
v8u40 = mx.VersionSpec("1.8.0_40")
v8 = mx.VersionSpec("1.8")
def _igvJdkVersionCheck(version):
return version >= v8 and (version < v8u20 or version >= v8u40)
return mx.get_jdk(_igvJdkVersionCheck, versionDescription='>= 1.8 and < 1.8.0u20 or >= 1.8.0u40', purpose="building & running IGV").home
def _igvBuildEnv():
# When the http_proxy environment variable is set, convert it to the proxy settings that ant needs
env = dict(os.environ)
proxy = os.environ.get('http_proxy')
if not (proxy is None) and len(proxy) > 0:
if '://' in proxy:
# Remove the http:// prefix (or any other protocol prefix)
proxy = proxy.split('://', 1)[1]
# Separate proxy server name and port number
proxyName, proxyPort = proxy.split(':', 1)
proxyEnv = '-DproxyHost="' + proxyName + '" -DproxyPort=' + proxyPort
env['ANT_OPTS'] = proxyEnv
env['JAVA_HOME'] = _igvJdk()
return env
def igv(args):
"""run the Ideal Graph Visualizer"""
logFile = '.ideal_graph_visualizer.log'
with open(join(_suite.dir, logFile), 'w') as fp:
mx.logv('[Ideal Graph Visualizer log is in ' + fp.name + ']')
nbplatform = join(_suite.dir, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'nbplatform')
# Remove NetBeans platform if it is earlier than the current supported version
if exists(nbplatform):
updateTrackingFile = join(nbplatform, 'platform', 'update_tracking', 'org-netbeans-core.xml')
if not exists(updateTrackingFile):
mx.log('Could not find \'' + updateTrackingFile + '\', removing NetBeans platform')
shutil.rmtree(nbplatform)
else:
dom = xml.dom.minidom.parse(updateTrackingFile)
currentVersion = mx.VersionSpec(dom.getElementsByTagName('module_version')[0].getAttribute('specification_version'))
supportedVersion = mx.VersionSpec('3.43.1')
if currentVersion < supportedVersion:
mx.log('Replacing NetBeans platform version ' + str(currentVersion) + ' with version ' + str(supportedVersion))
shutil.rmtree(nbplatform)
elif supportedVersion < currentVersion:
mx.log('Supported NetBeans version in igv command should be updated to ' + str(currentVersion))
if not exists(nbplatform):
mx.logv('[This execution may take a while as the NetBeans platform needs to be downloaded]')
env = _igvBuildEnv()
# make the jar for Batik 1.7 available.
env['IGV_BATIK_JAR'] = mx.library('BATIK').get_path(True)
if mx.run(['ant', '-f', mx._cygpathU2W(join(_suite.dir, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'build.xml')), '-l', mx._cygpathU2W(fp.name), 'run'], env=env, nonZeroIsFatal=False):
mx.abort("IGV ant build & launch failed. Check '" + logFile + "'. You can also try to delete 'src/share/tools/IdealGraphVisualizer/nbplatform'.")
def c1visualizer(args):
"""run the Cl Compiler Visualizer"""
libpath = join(_suite.dir, 'lib')
if mx.get_os() == 'windows':
executable = join(libpath, 'c1visualizer', 'bin', 'c1visualizer.exe')
else:
executable = join(libpath, 'c1visualizer', 'bin', 'c1visualizer')
# Check whether the current C1Visualizer installation is the up-to-date
if exists(executable) and not exists(mx.library('C1VISUALIZER_DIST').get_path(resolve=False)):
mx.log('Updating C1Visualizer')
shutil.rmtree(join(libpath, 'c1visualizer'))
archive = mx.library('C1VISUALIZER_DIST').get_path(resolve=True)
if not exists(executable):
zf = zipfile.ZipFile(archive, 'r')
zf.extractall(libpath)
if not exists(executable):
mx.abort('C1Visualizer binary does not exist: ' + executable)
if mx.get_os() != 'windows':
# Make sure that execution is allowed. The zip file does not always specfiy that correctly
os.chmod(executable, 0777)
mx.run([executable])
def hsdis(args, copyToDir=None):
"""download the hsdis library
This is needed to support HotSpot's assembly dumping features.
By default it downloads the Intel syntax version, use the 'att' argument to install AT&T syntax."""
flavor = 'intel'
if 'att' in args:
flavor = 'att'
if mx.get_arch() == "sparcv9":
flavor = "sparcv9"
lib = mx.add_lib_suffix('hsdis-' + mx.get_arch())
path = join(_suite.dir, 'lib', lib)
sha1s = {
'att/hsdis-amd64.dll' : 'bcbd535a9568b5075ab41e96205e26a2bac64f72',
'att/hsdis-amd64.so' : '58919ba085d4ef7a513f25bae75e7e54ee73c049',
'intel/hsdis-amd64.dll' : '6a388372cdd5fe905c1a26ced614334e405d1f30',
'intel/hsdis-amd64.so' : '844ed9ffed64fe9599638f29a8450c50140e3192',
'intel/hsdis-amd64.dylib' : 'fdb13ef0d7d23d93dacaae9c98837bea0d4fc5a2',
'sparcv9/hsdis-sparcv9.so': '970640a9af0bd63641f9063c11275b371a59ee60',
}
flavoredLib = flavor + "/" + lib
if flavoredLib not in sha1s:
mx.logv("hsdis not supported on this plattform or architecture")
return
if not exists(path):
sha1 = sha1s[flavoredLib]
sha1path = path + '.sha1'
mx.download_file_with_sha1('hsdis', path, ['https://lafo.ssw.uni-linz.ac.at/pub/hsdis/' + flavoredLib], sha1, sha1path, True, True, sources=False)
if copyToDir is not None and exists(copyToDir):
shutil.copy(path, copyToDir)
def hcfdis(args):
"""disassemble HexCodeFiles embedded in text files
Run a tool over the input files to convert all embedded HexCodeFiles
to a disassembled format."""
parser = ArgumentParser(prog='mx hcfdis')
parser.add_argument('-m', '--map', help='address to symbol map applied to disassembler output')
parser.add_argument('files', nargs=REMAINDER, metavar='files...')
args = parser.parse_args(args)
path = mx.library('HCFDIS').get_path(resolve=True)
mx.run_java(['-cp', path, 'com.oracle.max.hcfdis.HexCodeFileDis'] + args.files)
if args.map is not None:
addressRE = re.compile(r'0[xX]([A-Fa-f0-9]+)')
with open(args.map) as fp:
lines = fp.read().splitlines()
symbols = dict()
for l in lines:
addressAndSymbol = l.split(' ', 1)
if len(addressAndSymbol) == 2:
address, symbol = addressAndSymbol
if address.startswith('0x'):
address = long(address, 16)
symbols[address] = symbol
for f in args.files:
with open(f) as fp:
lines = fp.read().splitlines()
updated = False
for i in range(0, len(lines)):
l = lines[i]
for m in addressRE.finditer(l):
sval = m.group(0)
val = long(sval, 16)
sym = symbols.get(val)
if sym:
l = l.replace(sval, sym)
updated = True
lines[i] = l
if updated:
mx.log('updating ' + f)
with open('new_' + f, "w") as fp:
for l in lines:
print >> fp, l
def jol(args):
"""Java Object Layout"""
joljar = mx.library('JOL_INTERNALS').get_path(resolve=True)
candidates = mx.findclass(args, logToConsole=False, matcher=lambda s, classname: s == classname or classname.endswith('.' + s) or classname.endswith('$' + s))
if len(candidates) > 0:
candidates = mx.select_items(sorted(candidates))
else:
# mx.findclass can be mistaken, don't give up yet
candidates = args
run_vm(['-javaagent:' + joljar, '-cp', os.pathsep.join([mx.classpath(), joljar]), "org.openjdk.jol.MainObjectInternals"] + candidates)
class JVMCIArchiveParticipant:
def __init__(self, dist):
self.dist = dist
def __opened__(self, arc, srcArc, services):
self.services = services
self.jvmciServices = services
self.arc = arc
def __add__(self, arcname, contents):
return False
def __addsrc__(self, arcname, contents):
return False
def __closing__(self):
pass
def _get_openjdk_os():
# See: common/autoconf/platform.m4
os = mx.get_os()
if 'darwin' in os:
os = 'macosx'
elif 'linux' in os:
os = 'linux'
elif 'solaris' in os:
os = 'solaris'
elif 'cygwin' in os or 'mingw' in os:
os = 'windows'
return os
def _get_openjdk_cpu():
cpu = mx.get_arch()
if cpu == 'amd64':
cpu = 'x86_64'
elif cpu == 'sparcv9':
cpu = 'sparcv9'
return cpu
def _get_openjdk_os_cpu():
return _get_openjdk_os() + '-' + _get_openjdk_cpu()
def _get_jdk_build_dir(debugLevel=None):
"""
Gets the directory into which the JDK is built. This directory contains
the exploded JDK under jdk/ and the JDK image under images/jdk/.
"""
if debugLevel is None:
debugLevel = _vm.debugLevel
name = '{}-{}-{}-{}'.format(_get_openjdk_os_cpu(), 'normal', _vm.jvmVariant, debugLevel)
return join(dirname(_suite.dir), 'build', name)
_jvmci_bootclasspath_prepends = []
def _get_hotspot_build_dir(jvmVariant=None, debugLevel=None):
"""
Gets the directory in which a particular HotSpot configuration is built
(e.g., <JDK_REPO_ROOT>/build/macosx-x86_64-normal-server-release/hotspot/bsd_amd64_compiler2)
"""
if jvmVariant is None:
jvmVariant = _vm.jvmVariant
os = mx.get_os()
if os == 'darwin':
os = 'bsd'
arch = mx.get_arch()
buildname = {'client': 'compiler1', 'server': 'compiler2'}.get(jvmVariant, jvmVariant)
name = '{}_{}_{}'.format(os, arch, buildname)
return join(_get_jdk_build_dir(debugLevel=debugLevel), 'hotspot', name)
def add_bootclasspath_prepend(dep):
assert isinstance(dep, mx.ClasspathDependency)
_jvmci_bootclasspath_prepends.append(dep)
class JVMCI9JDKConfig(mx.JDKConfig):
def __init__(self, debugLevel):
self.debugLevel = debugLevel
jdkBuildDir = _get_jdk_build_dir(debugLevel)
jdkDir = join(jdkBuildDir, 'images', 'jdk') if mx.get_opts().use_jdk_image else join(jdkBuildDir, 'jdk')
mx.JDKConfig.__init__(self, jdkDir, tag=_JVMCI_JDK_TAG)
def parseVmArgs(self, args, addDefaultArgs=True):
args = mx.expand_project_in_args(args, insitu=False)
jacocoArgs = mx_gate.get_jacoco_agent_args()
if jacocoArgs:
args = jacocoArgs + args
args = ['-Xbootclasspath/p:' + dep.classpath_repr() for dep in _jvmci_bootclasspath_prepends] + args
# Remove JVMCI jars from class path. They are only necessary when
# compiling with a javac from JDK8 or earlier.
cpIndex, cp = mx.find_classpath_arg(args)
if cp:
excluded = frozenset([dist.path for dist in _suite.dists])
cp = os.pathsep.join([e for e in cp.split(os.pathsep) if e not in excluded])
args[cpIndex] = cp
jvmciModeArgs = _jvmciModes[_vm.jvmciMode]
if jvmciModeArgs:
bcpDeps = [jdkDist.dist() for jdkDist in jdkDeployedDists]
if bcpDeps:
args = ['-Xbootclasspath/p:' + os.pathsep.join([d.classpath_repr() for d in bcpDeps])] + args
# Set the default JVMCI compiler
for jdkDist in reversed(jdkDeployedDists):
assert isinstance(jdkDist, JvmciJDKDeployedDist), jdkDist
if jdkDist._compilers:
jvmciCompiler = jdkDist._compilers[-1]
args = ['-Djvmci.compiler=' + jvmciCompiler] + args
break
if '-version' in args:
ignoredArgs = args[args.index('-version') + 1:]
if len(ignoredArgs) > 0:
mx.log("Warning: The following options will be ignored by the vm because they come after the '-version' argument: " + ' '.join(ignoredArgs))
return self.processArgs(args, addDefaultArgs=addDefaultArgs)
# Overrides JDKConfig
def run_java(self, args, vm=None, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, env=None, addDefaultArgs=True):
if vm is None:
vm = 'server'
args = self.parseVmArgs(args, addDefaultArgs=addDefaultArgs)
jvmciModeArgs = _jvmciModes[_vm.jvmciMode]
cmd = [self.java] + ['-' + vm] + jvmciModeArgs + args
return mx.run(cmd, nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd)
"""
The dict of JVMCI JDKs indexed by debug-level names.
"""
_jvmci_jdks = {}
def get_jvmci_jdk(debugLevel=None):
"""
Gets the JVMCI JDK corresponding to 'debugLevel'.
"""
if not debugLevel:
debugLevel = _vm.debugLevel
jdk = _jvmci_jdks.get(debugLevel)
if jdk is None:
try:
jdk = JVMCI9JDKConfig(debugLevel)
except mx.JDKConfigException as e:
jdkBuildDir = _get_jdk_build_dir(debugLevel)
msg = 'Error with the JDK built into {}:\n{}\nTry (re)building it with: mx --jdk-debug-level={} make'
if mx.get_opts().use_jdk_image:
msg += ' images'
mx.abort(msg.format(jdkBuildDir, e.message, debugLevel))
_jvmci_jdks[debugLevel] = jdk
return jdk
class JVMCI9JDKFactory(mx.JDKFactory):
def getJDKConfig(self):
jdk = get_jvmci_jdk(_vm.debugLevel)
return jdk
def description(self):
return "JVMCI JDK"
mx.update_commands(_suite, {
'make': [_runmake, '[args...]', _makehelp],
'multimake': [_runmultimake, '[options]'],
'c1visualizer' : [c1visualizer, ''],
'hsdis': [hsdis, '[att]'],
'hcfdis': [hcfdis, ''],
'igv' : [igv, ''],
'jol' : [jol, ''],
'vm': [run_vm, '[-options] class [args...]'],
})
mx.add_argument('-M', '--jvmci-mode', action='store', choices=sorted(_jvmciModes.viewkeys()), help='the JVM variant type to build/run (default: ' + _vm.jvmciMode + ')')
mx.add_argument('--jdk-jvm-variant', '--vm', action='store', choices=_jdkJvmVariants + sorted(_legacyVms.viewkeys()), help='the JVM variant type to build/run (default: ' + _vm.jvmVariant + ')')
mx.add_argument('--jdk-debug-level', '--vmbuild', action='store', choices=_jdkDebugLevels + sorted(_legacyVmbuilds.viewkeys()), help='the JDK debug level to build/run (default: ' + _vm.debugLevel + ')')
mx.add_argument('-I', '--use-jdk-image', action='store_true', help='build/run JDK image instead of exploded JDK')
mx.addJDKFactory(_JVMCI_JDK_TAG, mx.JavaCompliance('9'), JVMCI9JDKFactory())
def mx_post_parse_cmd_line(opts):
mx.set_java_command_default_jdk_tag(_JVMCI_JDK_TAG)
jdkTag = mx.get_jdk_option().tag
jvmVariant = None
debugLevel = None
jvmciMode = None
if opts.jdk_jvm_variant is not None:
jvmVariant = opts.jdk_jvm_variant
if jdkTag and jdkTag != _JVMCI_JDK_TAG:
mx.warn('Ignoring "--jdk-jvm-variant" option as "--jdk" tag is not "' + _JVMCI_JDK_TAG + '"')
if opts.jdk_debug_level is not None:
debugLevel = _translateLegacyDebugLevel(opts.jdk_debug_level)
if jdkTag and jdkTag != _JVMCI_JDK_TAG:
mx.warn('Ignoring "--jdk-debug-level" option as "--jdk" tag is not "' + _JVMCI_JDK_TAG + '"')
if opts.jvmci_mode is not None:
jvmciMode = opts.jvmci_mode
if jdkTag and jdkTag != _JVMCI_JDK_TAG:
mx.warn('Ignoring "--jvmci-mode" option as "--jdk" tag is not "' + _JVMCI_JDK_TAG + '"')
_vm.update(jvmVariant, debugLevel, jvmciMode)
for jdkDist in jdkDeployedDists:
jdkDist.post_parse_cmd_line()
def _update_JDK9_STUBS_library():
"""
Sets the "path" and "sha1" attributes of the "JDK9_STUBS" library.
"""
jdk9InternalLib = _suite.suiteDict['libraries']['JDK9_STUBS']
jarInputDir = join(_suite.get_output_root(), 'jdk9-stubs')
jarPath = join(_suite.get_output_root(), 'jdk9-stubs.jar')
stubs = [
('jdk.internal.misc', 'VM', """package jdk.internal.misc;
public class VM {
public static String getSavedProperty(String key) {
throw new InternalError("should not reach here");
}
}
""")
]
if not exists(jarPath):
sourceFiles = []
for (package, className, source) in stubs:
sourceFile = join(jarInputDir, package.replace('.', os.sep), className + '.java')
mx.ensure_dir_exists(os.path.dirname(sourceFile))
with open(sourceFile, 'w') as fp:
fp.write(source)
sourceFiles.append(sourceFile)
jdk = mx.get_jdk(tag='default')
mx.run([jdk.javac, '-d', jarInputDir] + sourceFiles)
mx.run([jdk.jar, 'cf', jarPath, '.'], cwd=jarInputDir)
jdk9InternalLib['path'] = jarPath
jdk9InternalLib['sha1'] = mx.sha1OfFile(jarPath)
_update_JDK9_STUBS_library()
|
mohlerm/hotspot_cached_profiles
|
.mx.jvmci/mx_jvmci.py
|
Python
|
gpl-2.0
| 37,568
|
[
"VisIt"
] |
6f8d922f658ac936b54b8a53d1cc63e9930717f41a9fa764108aa8d6296cf45a
|
# (C) 2015, 2016 Elke Schaper @ Vital-IT, Swiss Institute of Bioinformatics
"""
.. moduleauthor:: Elke Schaper <elke.schaper@sib.swiss>
"""
import logging
import GPy
import numpy as np
from hts.data_tasks.gaussian_processes import create_gaussian_process_composite_kernel
LOG = logging.getLogger(__name__)
def cross_validate_predictions(x, y, prediction_method, p=1, **kwargs):
""" Calculate the prediction error of `prediction_method` on a dataset with input x and output y
with cross validation.
Args:
x (np.array): The input.
y (np.array): The output/signal.
prediction_method (method): The prediction method.
p (int): The cross-validation chunk size. E.g., p=1 is leave-one-out cross-validation.
"""
assert len(x) == len(y)
if p != 1:
raise Exception("p != 1 is not implemented; only leave-one-out cross-validation is possible.")
error = []
for i in range(len(x)):
y_predict = prediction_method(x=np.delete(x,i,axis=0), y=np.delete(y,i,axis=0), x_predict=x[i:i+1], **kwargs)
error.append((y[i:i+1]-y_predict).sum())
return error
def predict_with_gaussian_process(x, y, x_predict, kernel_kwargs=None, optimize_kwargs=None, **kwargs):
""" Train a Gaussian process model with training data `x`, `y`. Predict values for `x_predict`.
Args:
x (np.array): The input data for training.
y (np.array): The output data for training.
x_predict (np.array): The input data for prediction.
kernel_kwargs (dict): E.g. {"StdPeriodic": {"wavelengths": ("fix", 12)}}
optimize_kwargs (dict): E.g. {"optimizer": "bfgs", "max_f_eval": 1000}
"""
if kernel_kwargs == None:
kernel_kwargs = {}
if optimize_kwargs == None:
optimize_kwargs = {}
assert len(x) == len(y)
kernel = create_gaussian_process_composite_kernel(input_dim=x.shape[1], kernel=None, kernels=kernel_kwargs)
m = GPy.models.GPRegression(x, y, kernel)
LOG.info(m)
m.optimize(**optimize_kwargs)
y_predicted_mean, y_predicted_var = m.predict(x_predict)
return y_predicted_mean
|
elkeschaper/hts
|
hts/data_tasks/prediction.py
|
Python
|
gpl-2.0
| 2,141
|
[
"Gaussian"
] |
037492f23bd049354e37969081655855856f9dc3bfbe7480a7cead7f499717b7
|
#!/usr/bin/env python
#
# Wrapper script for invoking the jar.
#
# This script is written for use with the Conda package manager and is ported
# from a bash script that does the same thing, adapting the style in
# the peptide-shaker wrapper
# (https://github.com/bioconda/bioconda-recipes/blob/master/recipes/peptide-shaker/peptide-shaker.py)
import subprocess
import sys
import os
from os import access, getenv, path, X_OK
# Expected name of the VarScan JAR file.
JAR_NAME = 'wdltool.jar'
# Default options passed to the `java` executable.
DEFAULT_JVM_MEM_OPTS = ['-Xms512m', '-Xmx1g']
def real_dirname(in_path):
"""Returns the symlink-resolved, canonicalized directory-portion of
the given path."""
return path.dirname(path.realpath(in_path))
def java_executable():
"""Returns the name of the Java executable."""
java_home = getenv('JAVA_HOME')
java_bin = path.join('bin', 'java')
env_prefix = os.path.dirname(os.path.dirname(real_dirname(sys.argv[0])))
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
# Use Java installed with Anaconda to ensure correct version
return os.path.join(env_prefix, 'bin', 'java')
def jvm_opts(argv, default_mem_opts=DEFAULT_JVM_MEM_OPTS):
"""Constructs a list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts, prop_opts, pass_args = [], [], []
for arg in argv:
if arg.startswith('-D') or arg.startswith('-XX'):
opts_list = prop_opts
elif arg.startswith('-Xm'):
opts_list = mem_opts
else:
opts_list = pass_args
opts_list.append(arg)
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
jar_dir = real_dirname(sys.argv[0])
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
if pass_args != [] and pass_args[0].startswith('org'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = path.join(jar_dir, JAR_NAME)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == "__main__":
main()
|
colinbrislawn/bioconda-recipes
|
recipes/wdltool/wdltool.py
|
Python
|
mit
| 2,506
|
[
"Bioconda"
] |
ebd951347711fb5e4f1ade1bfbf650301463db66d36a06e5d8b068e95e202bc9
|
# Copyright (c) 2012, Cloudscaling
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import re
import pep8
"""
Guidelines for writing new hacking checks
- Use only for Nova specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range N3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the N3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to nova/tests/unit/test_hacking.py
"""
UNDERSCORE_IMPORT_FILES = []
session_check = re.compile(r"\w*def [a-zA-Z0-9].*[(].*session.*[)]")
cfg_re = re.compile(r".*\scfg\.")
vi_header_re = re.compile(r"^#\s+vim?:.+")
virt_file_re = re.compile(r"\./nova/(?:tests/)?virt/(\w+)/")
virt_import_re = re.compile(
r"^\s*(?:import|from) nova\.(?:tests\.)?virt\.(\w+)")
virt_config_re = re.compile(
r"CONF\.import_opt\('.*?', 'nova\.virt\.(\w+)('|.)")
author_tag_re = (re.compile("^\s*#\s*@?(a|A)uthor:"),
re.compile("^\.\.\s+moduleauthor::"))
asse_trueinst_re = re.compile(
r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, "
"(\w|\.|\'|\"|\[|\])+\)\)")
asse_equal_type_re = re.compile(
r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), "
"(\w|\.|\'|\"|\[|\])+\)")
asse_equal_in_end_with_true_or_false_re = re.compile(r"assertEqual\("
r"(\w|[][.'\"])+ in (\w|[][.'\", ])+, (True|False)\)")
asse_equal_in_start_with_true_or_false_re = re.compile(r"assertEqual\("
r"(True|False), (\w|[][.'\"])+ in (\w|[][.'\", ])+\)")
asse_equal_end_with_none_re = re.compile(
r"assertEqual\(.*?,\s+None\)$")
asse_equal_start_with_none_re = re.compile(
r"assertEqual\(None,")
# NOTE(snikitin): Next two regexes weren't united to one for more readability.
# asse_true_false_with_in_or_not_in regex checks
# assertTrue/False(A in B) cases where B argument has no spaces
# asse_true_false_with_in_or_not_in_spaces regex checks cases
# where B argument has spaces and starts/ends with [, ', ".
# For example: [1, 2, 3], "some string", 'another string'.
# We have to separate these regexes to escape a false positives
# results. B argument should have spaces only if it starts
# with [, ", '. Otherwise checking of string
# "assertFalse(A in B and C in D)" will be false positives.
# In this case B argument is "B and C in D".
asse_true_false_with_in_or_not_in = re.compile(r"assert(True|False)\("
r"(\w|[][.'\"])+( not)? in (\w|[][.'\",])+(, .*)?\)")
asse_true_false_with_in_or_not_in_spaces = re.compile(r"assert(True|False)"
r"\((\w|[][.'\"])+( not)? in [\[|'|\"](\w|[][.'\", ])+"
r"[\[|'|\"](, .*)?\)")
asse_raises_regexp = re.compile(r"assertRaisesRegexp\(")
conf_attribute_set_re = re.compile(r"CONF\.[a-z0-9_.]+\s*=\s*\w")
log_translation = re.compile(
r"(.)*LOG\.(audit|error|critical)\(\s*('|\")")
log_translation_info = re.compile(
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
log_translation_exception = re.compile(
r"(.)*LOG\.(exception)\(\s*(_\(|'|\")")
log_translation_LW = re.compile(
r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")")
translated_log = re.compile(
r"(.)*LOG\.(audit|error|info|critical|exception)"
"\(\s*_\(\s*('|\")")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
string_translation = re.compile(r"[^_]*_\(\s*('|\")")
underscore_import_check = re.compile(r"(.)*import _(.)*")
import_translation_for_log_or_exception = re.compile(
r"(.)*(from\snova.i18n\simport)\s_")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
api_version_re = re.compile(r"@.*api_version")
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
decorator_re = re.compile(r"@.*")
# TODO(dims): When other oslo libraries switch over non-namespace'd
# imports, we need to add them to the regexp below.
oslo_namespace_imports = re.compile(r"from[\s]*oslo[.]"
r"(concurrency|config|db|i18n|messaging|"
r"middleware|serialization|utils|vmware)")
oslo_namespace_imports_2 = re.compile(r"from[\s]*oslo[\s]*import[\s]*"
r"(concurrency|config|db|i18n|messaging|"
r"middleware|serialization|utils|vmware)")
oslo_namespace_imports_3 = re.compile(r"import[\s]*oslo\."
r"(concurrency|config|db|i18n|messaging|"
r"middleware|serialization|utils|vmware)")
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
def __init__(self, tree, filename):
"""This object is created automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pep8."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
message = message or self.CHECK_DESC
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
def _check_call_names(self, call_node, names):
if isinstance(call_node, ast.Call):
if isinstance(call_node.func, ast.Name):
if call_node.func.id in names:
return True
return False
def import_no_db_in_virt(logical_line, filename):
"""Check for db calls from nova/virt
As of grizzly-2 all the database calls have been removed from
nova/virt, and we want to keep it that way.
N307
"""
if "nova/virt" in filename and not filename.endswith("fake.py"):
if logical_line.startswith("from nova import db"):
yield (0, "N307: nova.db import not allowed in nova/virt/*")
def no_db_session_in_public_api(logical_line, filename):
if "db/api.py" in filename:
if session_check.match(logical_line):
yield (0, "N309: public db api methods may not accept session")
def use_timeutils_utcnow(logical_line, filename):
# tools are OK to use the standard datetime module
if "/tools/" in filename:
return
msg = "N310: timeutils.utcnow() must be used instead of datetime.%s()"
datetime_funcs = ['now', 'utcnow']
for f in datetime_funcs:
pos = logical_line.find('datetime.%s' % f)
if pos != -1:
yield (pos, msg % f)
def _get_virt_name(regex, data):
m = regex.match(data)
if m is None:
return None
driver = m.group(1)
# Ignore things we mis-detect as virt drivers in the regex
if driver in ["test_virt_drivers", "driver", "firewall",
"disk", "api", "imagecache", "cpu", "hardware"]:
return None
# TODO(berrange): remove once bugs 1261826 and 126182 are
# fixed, or baremetal driver is removed, which is first.
if driver == "baremetal":
return None
return driver
def import_no_virt_driver_import_deps(physical_line, filename):
"""Check virt drivers' modules aren't imported by other drivers
Modules under each virt driver's directory are
considered private to that virt driver. Other drivers
in Nova must not access those drivers. Any code that
is to be shared should be refactored into a common
module
N311
"""
thisdriver = _get_virt_name(virt_file_re, filename)
thatdriver = _get_virt_name(virt_import_re, physical_line)
if (thatdriver is not None and
thisdriver is not None and
thisdriver != thatdriver):
return (0, "N311: importing code from other virt drivers forbidden")
def import_no_virt_driver_config_deps(physical_line, filename):
"""Check virt drivers' config vars aren't used by other drivers
Modules under each virt driver's directory are
considered private to that virt driver. Other drivers
in Nova must not use their config vars. Any config vars
that are to be shared should be moved into a common module
N312
"""
thisdriver = _get_virt_name(virt_file_re, filename)
thatdriver = _get_virt_name(virt_config_re, physical_line)
if (thatdriver is not None and
thisdriver is not None and
thisdriver != thatdriver):
return (0, "N312: using config vars from other virt drivers forbidden")
def capital_cfg_help(logical_line, tokens):
msg = "N313: capitalize help string"
if cfg_re.match(logical_line):
for t in range(len(tokens)):
if tokens[t][1] == "help":
txt = tokens[t + 2][1]
if len(txt) > 1 and txt[1].islower():
yield(0, msg)
def no_vi_headers(physical_line, line_number, lines):
"""Check for vi editor configuration in source files.
By default vi modelines can only appear in the first or
last 5 lines of a source file.
N314
"""
# NOTE(gilliard): line_number is 1-indexed
if line_number <= 5 or line_number > len(lines) - 5:
if vi_header_re.match(physical_line):
return 0, "N314: Don't put vi configuration in source files"
def assert_true_instance(logical_line):
"""Check for assertTrue(isinstance(a, b)) sentences
N316
"""
if asse_trueinst_re.match(logical_line):
yield (0, "N316: assertTrue(isinstance(a, b)) sentences not allowed")
def assert_equal_type(logical_line):
"""Check for assertEqual(type(A), B) sentences
N317
"""
if asse_equal_type_re.match(logical_line):
yield (0, "N317: assertEqual(type(A), B) sentences not allowed")
def assert_equal_none(logical_line):
"""Check for assertEqual(A, None) or assertEqual(None, A) sentences
N318
"""
res = (asse_equal_start_with_none_re.search(logical_line) or
asse_equal_end_with_none_re.search(logical_line))
if res:
yield (0, "N318: assertEqual(A, None) or assertEqual(None, A) "
"sentences not allowed")
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
* Use filename so we can start enforcing this in specific folders instead
of needing to do so all at once.
N319
"""
if logical_line.startswith("LOG.debug(_("):
yield(0, "N319 Don't translate debug level logs")
def no_import_translation_in_tests(logical_line, filename):
"""Check for 'from nova.i18n import _'
N337
"""
if 'nova/tests/' in filename:
res = import_translation_for_log_or_exception.match(logical_line)
if res:
yield(0, "N337 Don't import translation in tests")
def no_setting_conf_directly_in_tests(logical_line, filename):
"""Check for setting CONF.* attributes directly in tests
The value can leak out of tests affecting how subsequent tests run.
Using self.flags(option=value) is the preferred method to temporarily
set config options in tests.
N320
"""
if 'nova/tests/' in filename:
res = conf_attribute_set_re.match(logical_line)
if res:
yield (0, "N320: Setting CONF.* attributes directly in tests is "
"forbidden. Use self.flags(option=value) instead")
def validate_log_translations(logical_line, physical_line, filename):
# Translations are not required in the test directory
# and the Xen utilities
if ("nova/tests" in filename or
"plugins/xenserver/xenapi/etc/xapi.d" in filename or
# TODO(Mike_D):Needs to be remove with:
# I075ab2a522272f2082c292dfedc877abd8ebe328
"nova/virt/libvirt" in filename):
return
if pep8.noqa(physical_line):
return
msg = "N328: LOG.info messages require translations `_LI()`!"
if log_translation_info.match(logical_line):
yield (0, msg)
msg = "N329: LOG.exception messages require translations `_LE()`!"
if log_translation_exception.match(logical_line):
yield (0, msg)
msg = "N330: LOG.warning, LOG.warn messages require translations `_LW()`!"
if log_translation_LW.match(logical_line):
yield (0, msg)
msg = "N321: Log messages require translations!"
if log_translation.match(logical_line):
yield (0, msg)
def no_mutable_default_args(logical_line):
msg = "N322: Method's default argument shouldn't be mutable!"
if mutable_default_args.match(logical_line):
yield (0, msg)
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate logs are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
if filename in UNDERSCORE_IMPORT_FILES:
pass
elif (underscore_import_check.match(logical_line) or
custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif (translated_log.match(logical_line) or
string_translation.match(logical_line)):
yield(0, "N323: Found use of _() without explicit import of _ !")
def use_jsonutils(logical_line, filename):
# the code below that path is not meant to be executed from neutron
# tree where jsonutils module is present, so don't enforce its usage
# for this subdirectory
if "plugins/xenserver" in filename:
return
# tools are OK to use the standard json module
if "/tools/" in filename:
return
msg = "N324: jsonutils.%(fun)s must be used instead of json.%(fun)s"
if "json." in logical_line:
json_funcs = ['dumps(', 'dump(', 'loads(', 'load(']
for f in json_funcs:
pos = logical_line.find('json.%s' % f)
if pos != -1:
yield (pos, msg % {'fun': f[:-1]})
def check_api_version_decorator(logical_line, previous_logical, blank_before,
filename):
msg = ("N332: the api_version decorator must be the first decorator"
" on a method.")
if blank_before == 0 and re.match(api_version_re, logical_line) \
and re.match(decorator_re, previous_logical):
yield(0, msg)
class CheckForStrUnicodeExc(BaseASTChecker):
"""Checks for the use of str() or unicode() on an exception.
This currently only handles the case where str() or unicode()
is used in the scope of an exception handler. If the exception
is passed into a function, returned from an assertRaises, or
used on an exception created in the same scope, this does not
catch it.
"""
CHECK_DESC = ('N325 str() and unicode() cannot be used on an '
'exception. Remove or use six.text_type()')
def __init__(self, tree, filename):
super(CheckForStrUnicodeExc, self).__init__(tree, filename)
self.name = []
self.already_checked = []
def visit_TryExcept(self, node):
for handler in node.handlers:
if handler.name:
self.name.append(handler.name.id)
super(CheckForStrUnicodeExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrUnicodeExc, self).generic_visit(node)
def visit_Call(self, node):
if self._check_call_names(node, ['str', 'unicode']):
if node not in self.already_checked:
self.already_checked.append(node)
if isinstance(node.args[0], ast.Name):
if node.args[0].id in self.name:
self.add_error(node.args[0])
super(CheckForStrUnicodeExc, self).generic_visit(node)
class CheckForTransAdd(BaseASTChecker):
"""Checks for the use of concatenation on a translated string.
Translations should not be concatenated with other strings, but
should instead include the string being added to the translated
string to give the translators the most information.
"""
CHECK_DESC = ('N326 Translated messages cannot be concatenated. '
'String should be included in translated message.')
TRANS_FUNC = ['_', '_LI', '_LW', '_LE', '_LC']
def visit_BinOp(self, node):
if isinstance(node.op, ast.Add):
if self._check_call_names(node.left, self.TRANS_FUNC):
self.add_error(node.left)
elif self._check_call_names(node.right, self.TRANS_FUNC):
self.add_error(node.right)
super(CheckForTransAdd, self).generic_visit(node)
def check_oslo_namespace_imports(logical_line, blank_before, filename):
if re.match(oslo_namespace_imports, logical_line):
msg = ("N333: '%s' must be used instead of '%s'.") % (
logical_line.replace('oslo.', 'oslo_'),
logical_line)
yield(0, msg)
match = re.match(oslo_namespace_imports_2, logical_line)
if match:
msg = ("N333: 'module %s should not be imported "
"from oslo namespace.") % match.group(1)
yield(0, msg)
match = re.match(oslo_namespace_imports_3, logical_line)
if match:
msg = ("N333: 'module %s should not be imported "
"from oslo namespace.") % match.group(1)
yield(0, msg)
def assert_true_or_false_with_in(logical_line):
"""Check for assertTrue/False(A in B), assertTrue/False(A not in B),
assertTrue/False(A in B, message) or assertTrue/False(A not in B, message)
sentences.
N334
"""
res = (asse_true_false_with_in_or_not_in.search(logical_line) or
asse_true_false_with_in_or_not_in_spaces.search(logical_line))
if res:
yield (0, "N334: Use assertIn/NotIn(A, B) rather than "
"assertTrue/False(A in/not in B) when checking collection "
"contents.")
def assert_raises_regexp(logical_line):
"""Check for usage of deprecated assertRaisesRegexp
N335
"""
res = asse_raises_regexp.search(logical_line)
if res:
yield (0, "N335: assertRaisesRegex must be used instead "
"of assertRaisesRegexp")
def dict_constructor_with_list_copy(logical_line):
msg = ("N336: Must use a dict comprehension instead of a dict constructor"
" with a sequence of key-value pairs."
)
if dict_constructor_with_list_copy_re.match(logical_line):
yield (0, msg)
def assert_equal_in(logical_line):
"""Check for assertEqual(A in B, True), assertEqual(True, A in B),
assertEqual(A in B, False) or assertEqual(False, A in B) sentences
N338
"""
res = (asse_equal_in_start_with_true_or_false_re.search(logical_line) or
asse_equal_in_end_with_true_or_false_re.search(logical_line))
if res:
yield (0, "N338: Use assertIn/NotIn(A, B) rather than "
"assertEqual(A in B, True/False) when checking collection "
"contents.")
def factory(register):
register(import_no_db_in_virt)
register(no_db_session_in_public_api)
register(use_timeutils_utcnow)
register(import_no_virt_driver_import_deps)
register(import_no_virt_driver_config_deps)
register(capital_cfg_help)
register(no_vi_headers)
register(no_import_translation_in_tests)
register(assert_true_instance)
register(assert_equal_type)
register(assert_equal_none)
register(assert_raises_regexp)
register(no_translate_debug_logs)
register(no_setting_conf_directly_in_tests)
register(validate_log_translations)
register(no_mutable_default_args)
register(check_explicit_underscore_import)
register(use_jsonutils)
register(check_api_version_decorator)
register(CheckForStrUnicodeExc)
register(CheckForTransAdd)
register(check_oslo_namespace_imports)
register(assert_true_or_false_with_in)
register(dict_constructor_with_list_copy)
register(assert_equal_in)
|
sajeeshcs/nested_quota_final
|
nova/hacking/checks.py
|
Python
|
apache-2.0
| 21,797
|
[
"VisIt"
] |
fa2b765f28596b5d8de596d82cee608a692982f6cf3ffd17ff90aadaddf9ea98
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Grass(AutotoolsPackage):
"""GRASS GIS (Geographic Resources Analysis Support System), is a free
and open source Geographic Information System (GIS) software suite
used for geospatial data management and analysis, image processing,
graphics and maps production, spatial modeling, and visualization."""
homepage = "https://grass.osgeo.org"
url = "https://grass.osgeo.org/grass78/source/grass-7.8.2.tar.gz"
list_url = "https://grass.osgeo.org/download/software/sources/"
git = "https://github.com/OSGeo/grass.git"
maintainers = ['adamjstewart']
version('master', branch='master')
version('7.8.2', sha256='33576f7078f805b39ca20c2fa416ac79c64260c0581072a6dc7d813f53aa9abb')
version('7.8.1', sha256='6ae578fd67afcce7abec4ba4505dcc55b3d2dfe0ca46b99d966cb148c654abb3')
version('7.8.0', sha256='4b1192294e959ffd962282344e4ff325c4472f73abe605e246a1da3beda7ccfa')
version('7.6.1', sha256='9e25c99cafd16ed8f5e2dca75b5a10dc2af0568dbedf3fc39f1c5a0a9c840b0b')
version('7.4.4', sha256='96a39e273103f7375a670eba94fa3e5dad2819c5c5664c9aee8f145882a94e8c')
version('7.4.3', sha256='004e65693ee97fd4d5dc7ad244e3286a115dccd88964d04be61c07db6574b399')
version('7.4.2', sha256='18eb19bc0aa4cd7be3f30f79ac83f9d0a29c63657f4c1b05bf4c5d5d57a8f46d')
version('7.4.1', sha256='560b8669caaafa9e8dbd4bbf2b4b4bbab7dca1cc46ee828eaf26c744fe0635fc')
version('7.4.0', sha256='cb6fa188e030a3a447fc5451fbe0ecbeb4069ee2fd1bf52ed8e40e9b89e293cc')
variant('cxx', default=True, description='Support C++ functionality')
variant('tiff', default=False, description='Support TIFF functionality')
variant('png', default=False, description='Support PNG functionality')
variant('postgres', default=False, description='Support PostgreSQL functionality')
variant('mysql', default=False, description='Support MySQL functionality')
variant('sqlite', default=False, description='Support SQLite functionality')
variant('opengl', default=False, description='Support OpenGL functionality')
variant('odbc', default=False, description='Support ODBC functionality')
variant('fftw', default=False, description='Support FFTW functionality')
variant('blas', default=False, description='Support BLAS functionality')
variant('lapack', default=False, description='Support LAPACK functionality')
variant('cairo', default=False, description='Support Cairo functionality')
variant('freetype', default=False, description='Support FreeType functionality')
variant('readline', default=False, description='Support Readline functionality')
variant('regex', default=False, description='Support regex functionality')
variant('pthread', default=False, description='Support POSIX threads functionality')
variant('openmp', default=False, description='Support OpenMP functionality')
variant('opencl', default=False, description='Support OpenCL functionality')
variant('bzlib', default=False, description='Support BZIP2 functionality')
variant('zstd', default=False, description='Support Zstandard functionality')
variant('gdal', default=True, description='Enable GDAL/OGR support')
variant('liblas', default=False, description='Enable libLAS support')
variant('wxwidgets', default=False, description='Enable wxWidgets support')
variant('netcdf', default=False, description='Enable NetCDF support')
variant('geos', default=False, description='Enable GEOS support')
variant('x', default=False, description='Use the X Window System')
# http://htmlpreview.github.io/?https://github.com/OSGeo/grass/blob/master/REQUIREMENTS.html
# General requirements
depends_on('gmake@3.81:', type='build')
depends_on('iconv')
depends_on('zlib')
depends_on('flex', type='build')
depends_on('bison', type='build')
depends_on('proj')
depends_on('proj@:4', when='@:7.5')
# GRASS 7.8.0 was supposed to support PROJ 6, but it still checks for
# share/proj/epsg, which was removed in PROJ 6
depends_on('proj@:5', when='@:7.8.0')
# PROJ6 support released in GRASS 7.8.1
# https://courses.neteler.org/grass-gis-7-8-1-released-with-proj-6-and-gdal-3-support/
depends_on('proj@6:', when='@7.8.1:')
depends_on('python@2.7:', type=('build', 'run'))
depends_on('python@2.7:2.8', when='@:7.6', type=('build', 'run'))
depends_on('py-six', when='@7.8:', type=('build', 'run'))
# Optional packages
depends_on('libtiff', when='+tiff')
depends_on('libpng', when='+png')
depends_on('postgresql', when='+postgres')
depends_on('mariadb', when='+mysql')
depends_on('sqlite', when='+sqlite')
depends_on('gl', when='+opengl')
depends_on('unixodbc', when='+odbc')
depends_on('fftw', when='+fftw')
depends_on('blas', when='+blas')
depends_on('lapack', when='+lapack')
depends_on('cairo@1.5.8:', when='+cairo')
depends_on('freetype', when='+freetype')
depends_on('readline', when='+readline')
depends_on('opencl', when='+opencl')
depends_on('bzip2', when='+bzlib')
depends_on('zstd', when='+zstd')
depends_on('gdal', when='+gdal') # required?
depends_on('liblas', when='+liblas')
depends_on('wxwidgets', when='+wxwidgets')
depends_on('py-wxpython@2.8.10.1:', when='+wxwidgets', type=('build', 'run'))
depends_on('netcdf-c', when='+netcdf')
depends_on('geos', when='+geos')
depends_on('libx11', when='+x')
def url_for_version(self, version):
url = "https://grass.osgeo.org/grass{0}/source/grass-{1}.tar.gz"
return url.format(version.up_to(2).joined, version)
# https://grasswiki.osgeo.org/wiki/Compile_and_Install
def configure_args(self):
spec = self.spec
args = [
'--without-nls',
# TODO: add packages for these optional dependencies
'--without-opendwg',
'--without-pdal',
'--with-proj-share={0}'.format(spec['proj'].prefix.share.proj),
]
if '+cxx' in spec:
args.append('--with-cxx')
else:
args.append('--without-cxx')
if '+tiff' in spec:
args.append('--with-tiff')
else:
args.append('--without-tiff')
if '+png' in spec:
args.append('--with-png')
else:
args.append('--without-png')
if '+postgres' in spec:
args.append('--with-postgres')
else:
args.append('--without-postgres')
if '+mysql' in spec:
args.append('--with-mysql')
else:
args.append('--without-mysql')
if '+sqlite' in spec:
args.append('--with-sqlite')
else:
args.append('--without-sqlite')
if '+opengl' in spec:
args.append('--with-opengl')
else:
args.append('--without-opengl')
if '+odbc' in spec:
args.append('--with-odbc')
else:
args.append('--without-odbc')
if '+fftw' in spec:
args.append('--with-fftw')
else:
args.append('--without-fftw')
if '+blas' in spec:
args.append('--with-blas')
else:
args.append('--without-blas')
if '+lapack' in spec:
args.append('--with-lapack')
else:
args.append('--without-lapack')
if '+cairo' in spec:
args.append('--with-cairo')
else:
args.append('--without-cairo')
if '+freetype' in spec:
args.append('--with-freetype')
else:
args.append('--without-freetype')
if '+readline' in spec:
args.append('--with-readline')
else:
args.append('--without-readline')
if '+regex' in spec:
args.append('--with-regex')
else:
args.append('--without-regex')
if '+pthread' in spec:
args.append('--with-pthread')
else:
args.append('--without-pthread')
if '+openmp' in spec:
args.append('--with-openmp')
else:
args.append('--without-openmp')
if '+opencl' in spec:
args.append('--with-opencl')
else:
args.append('--without-opencl')
if '+bzlib' in spec:
args.append('--with-bzlib')
else:
args.append('--without-bzlib')
if '+zstd' in spec:
args.append('--with-zstd')
else:
args.append('--without-zstd')
if '+gdal' in spec:
args.append('--with-gdal={0}/gdal-config'.format(
spec['gdal'].prefix.bin))
else:
args.append('--without-gdal')
if '+liblas' in spec:
args.append('--with-liblas={0}/liblas-config'.format(
spec['liblas'].prefix.bin))
else:
args.append('--without-liblas')
if '+wxwidgets' in spec:
args.append('--with-wxwidgets={0}/wx-config'.format(
spec['wxwidgets'].prefix.bin))
else:
args.append('--without-wxwidgets')
if '+netcdf' in spec:
args.append('--with-netcdf={0}/bin/nc-config'.format(
spec['netcdf-c'].prefix))
else:
args.append('--without-netcdf')
if '+geos' in spec:
args.append('--with-geos={0}/bin/geos-config'.format(
spec['geos'].prefix))
else:
args.append('--without-geos')
if '+x' in spec:
args.append('--with-x')
else:
args.append('--without-x')
return args
# see issue: https://github.com/spack/spack/issues/11325
# 'Platform.make' is created after configure step
# hence invoke the following function afterwards
@run_after('configure')
def fix_iconv_linking(self):
if self.spec['iconv'].name != 'libiconv':
return
makefile = FileFilter('include/Make/Platform.make')
makefile.filter(r'^ICONVLIB\s*=.*', 'ICONVLIB = -liconv')
|
iulian787/spack
|
var/spack/repos/builtin/packages/grass/package.py
|
Python
|
lgpl-2.1
| 10,428
|
[
"NetCDF"
] |
3c0343956d1c70f5ab3db99b4fc7b778f8124ccfc322a4f8fffdc8a91b7850b3
|
__source__ = 'https://leetcode.com/problems/course-schedule/description/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/course-schedule.py
# Time: O(|V| + |E|)
# Space: O(|E|)
#
# Description: Leetcode # 207. Course Schedule
#
# There are a total of n courses you have to take, labeled from 0 to n - 1.
#
# Some courses may have prerequisites, for example to take course 0
# you have to first take course 1, which is expressed as a pair: [0,1]
#
# Given the total number of courses and a list of prerequisite pairs,
# is it possible for you to finish all courses?
#
# For example:
#
# 2, [[1,0]]
# There are a total of 2 courses to take. To take course 1
# you should have finished course 0. So it is possible.
#
# 2, [[1,0],[0,1]]
# There are a total of 2 courses to take. To take course 1 you should have
# finished course 0, and to take course 0 you should also have finished course 1. So it is impossible.
#
# click to show more hints.
#
# Hints:
# This problem is equivalent to finding if a cycle exists in a directed graph.
# If a cycle exists, no topological ordering exists and therefore it will be impossible to take all courses.
# There are several ways to represent a graph. For example, the input prerequisites is a graph represented by
# a list of edges. Is this graph representation appropriate?
# Topological Sort via DFS - A great video tutorial (21 minutes) on Coursera explaining the basic concepts
# of Topological Sort.
# Topological sort could also be done via BFS.
#
# Companies
# Apple Yelp Zenefits
# Related Topics
# Depth-first Search Breadth-first Search Graph Topological Sort
# Similar Questions
# Course Schedule II Graph Valid Tree Minimum Height Trees Course Schedule III
#
import collections
import unittest
# http://algobox.org/course-schedule/
class Solution(object):
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
from collections import deque
n = numCourses
ind = [ [] for _ in xrange(n)] #indegree
ind_cnt = [0] * n #outdegree
for p in prerequisites:
ind_cnt[p[0]] += 1
ind[p[1]].append(p[0])
print ind, ind_cnt
dq = deque()
for i in xrange(n):
if ind_cnt[i] == 0:
dq.append(i)
k = 0
while dq:
x = dq.popleft()
k += 1
for i in ind[x]:
ind_cnt[i] -= 1
if ind_cnt[i] == 0:
dq.append(i)
return k == n
class Solution2(object):
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
if not prerequisites or numCourses == 0:
return True
indegree = [ 0 for x in xrange(numCourses)]
queue = []
for p, q in prerequisites:
indegree[p] += 1
for i in xrange(len(indegree)):
if indegree[i] == 0:
queue.append(i)
while queue:
cur = queue[0]
queue.pop(0)
for p, q in prerequisites:
if q == cur:
indegree[p] -= 1
if indegree[p] == 0:
queue.append(p)
for i in xrange(len(indegree)):
if indegree[i] > 0:
return False
return True
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
print Solution().canFinish(1, [])
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
# BFS //aka traditional topological sort
# 7ms 89.50%
class Solution {
public boolean canFinish(int numCourses, int[][] prerequisites) {
Map<Integer, List<Integer>> graph = new HashMap<>();
int[] degrees = new int[numCourses];
Queue<Integer> queue = new LinkedList<>();
int count = numCourses;
for (int i = 0; i < numCourses; i++) {
graph.put(i, new ArrayList<>());
}
for (int[] prerequisite : prerequisites) {
graph.get(prerequisite[1]).add(prerequisite[0]);
degrees[prerequisite[0]]++; //drgree 0 not 1 //degree++ preq
}
for (int i = 0; i < numCourses; i++) {
if (degrees[i] == 0) {
queue.add(i);
count--; // or (i) count = 0
}
}
while (!queue.isEmpty()) {
int cur = queue.poll(); //and (ii) do count++ here, and last verify count == numCourses
for (int next : graph.get(cur)) {
degrees[next]--;
if (degrees[next] == 0) {
queue.add(next);
count--;
}
}
}
return count == 0;
}
}
# use array
# 3ms 100%
class Solution {
public boolean canFinish(int numCourses, int[][] prerequisites) {
if(numCourses == 0 || prerequisites == null || prerequisites.length == 0) return true;
int[] degree = new int[numCourses];
int[] point = new int[numCourses];
int[] next = new int[prerequisites.length + 1];
for (int i = 0; i < prerequisites.length; i++) {
next[i + 1] = point[prerequisites[i][1]];
point[prerequisites[i][1]] = i + 1;
degree[prerequisites[i][0]]++;
}
int head = 0, tail = 0;
int[] q = new int[numCourses];
for (int i =0 ; i < numCourses; i++) {
if (degree[i] == 0) {
q[tail] = i;
tail++;
}
}
if (tail == numCourses) return true;
while (head < tail) {
int l = point[q[head]];
while (l != 0) {
degree[prerequisites[l - 1][0]]--;
if (degree[prerequisites[l - 1][0]]== 0) {
q[tail] = prerequisites[l-1][0];
tail++;
if (tail == numCourses) return true;
}
l = next[l];
}
head++;
}
return false;
}
}
#Topological Sort //bother p[0], p[1] can be key but degree only adds at value noy key
ex:
for(int[] p : prerequisites){
ind_cnt[p[1]] += 1;
ind.get(p[0]).add(p[1]);
}
for(int[] p : prerequisites){
ind_cnt[p[0]] += 1;
ind.get(p[1]).add(p[0]);
}
# 8ms 84.50%
class Solution {
public boolean canFinish(int numCourses, int[][] prerequisites) {
//The ind_cnt[i] is the number of prereqs for course i
//aka indegree count
int[] ind_cnt = new int[numCourses];
List<List<Integer>> ind = new ArrayList<>();
for(int i = 0; i < numCourses ; i++){
ind.add(new ArrayList<>());
}
for(int[] p : prerequisites){
ind_cnt[p[0]] += 1;
ind.get(p[1]).add(p[0]);
}
Queue<Integer> queue = new ArrayDeque<>();
for(int i = 0; i < numCourses ; i++){
if(ind_cnt[i] == 0){
queue.offer(i);
}
}
int k = 0;
//for (;!queue.isEmpty();++k)
// for (int i : ind.get(queue.poll())) if (--oud[i] == 0) queue.offer(i);
while(!queue.isEmpty()){
int i = queue.poll();
k += 1;
for(int j : ind.get(i)){
ind_cnt[j] -= 1;
if(ind_cnt[j] == 0){
queue.offer(j);
}
}
}
return k == numCourses;
}
}
# DFS //note, either p[0], p[1] can be key
# 7ms 89.50%
public class Solution {
public boolean canFinish(int numCourses, int[][] prerequisites) {
if(numCourses == 0 || prerequisites == null || prerequisites.length == 0) return true;
int[] visited = new int[numCourses];
Map<Integer, List<Integer>> map = new HashMap<>();
for(int[] p : prerequisites){
if(! map.containsKey(p[1])){ //note, either p[0], p[1] can be key
map.put(p[1], new ArrayList());
}
map.get(p[1]).add(p[0]);
}
for(int i = 0; i < numCourses ; i++){
if(!dfs(visited, map, i)){
return false;
}
}
return true;
}
//During recursion, if we follow a back edge which points to a previous node which is being visited,
// then we find a cycle. Return false
private boolean dfs(int[] visited, Map<Integer, List<Integer>> map, int i ){
if(visited[i] == -1){ //visited
return false;
}
if(visited[i] == 1){ //not visit
return true;
}
visited[i] = -1; // mark as visited
if(map.containsKey(i)){
for(int val: map.get(i)){
if(!dfs(visited, map, val)){
return false;
}
}
}
visited[i] = 1; //unmark visited
return true;
}
}
# 11ms 70.61%
class Solution {
public boolean canFinish(int numCourses, int[][] prerequisites) {
List<List<Integer>> graph = new ArrayList<>();
int[] degrees = new int[numCourses];
Queue<Integer> queue = new LinkedList<>();
buildGraph(numCourses, prerequisites, graph, degrees);
for (int i = 0; i < numCourses; i++) {
if (degrees[i] == 0) {
queue.add(i);
}
}
while (!queue.isEmpty()) {
int cur = queue.poll();
for (int next : graph.get(cur)) {
degrees[next]--;
if (degrees[next] == 0) {
queue.add(next);
}
}
}
for (int i = 0; i < numCourses; i++) {
if (degrees[i] != 0) {
return false;
}
}
return true;
}
private void buildGraph(int numCourses, int[][] prerequisites, List<List<Integer>> graph, int[] degrees) {
for (int i = 0; i < numCourses; i++) {
graph.add(new ArrayList<>());
}
for (int[] edge : prerequisites) {
graph.get(edge[1]).add(edge[0]);
degrees[edge[0]]++;
}
}
}
'''
|
JulyKikuAkita/PythonPrac
|
cs15211/CourseSchedule.py
|
Python
|
apache-2.0
| 10,360
|
[
"VisIt"
] |
8bfdf3de8620b676c8383f6d2edb2e48155d6ae142c074e50d18642551f02111
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
def GetRGBColor(colorName):
'''
Return the red, green and blue components for a
color as doubles.
'''
rgb = [0.0, 0.0, 0.0] # black
vtk.vtkNamedColors().GetColorRGB(colorName, rgb)
return rgb
NUMBER_OF_PIECES = 5
# Generate implicit model of a sphere
#
# Create renderer stuff
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create pipeline that handles ghost cells
sphere = vtk.vtkSphereSource()
sphere.SetRadius(3)
sphere.SetPhiResolution(100)
sphere.SetThetaResolution(150)
# Just playing with an alternative that is not currently used.
def NotUsed ():
# This filter actually spoils the example because it asks
# for the whole input.
# The only reason it is here is because sphere complains
# it cannot generate ghost cells.
vtkExtractPolyDataPiece.piece()
piece.SetInputConnection(sphere.GetOutputPort())
# purposely put seams in here.
piece.CreateGhostCellsOff()
# purposely put seams in here.
pdn = vtk.vtkPolyDataNormals()
pdn.SetInputConnection(piece.GetOutputPort())
# Just playing with an alternative that is not currently used.
deci = vtk.vtkDecimatePro()
deci.SetInputConnection(sphere.GetOutputPort())
# this did not remove seams as I thought it would
deci.BoundaryVertexDeletionOff()
# deci.PreserveTopologyOn()
# Since quadric Clustering does not handle borders properly yet,
# the pieces will have dramatic "seams"
q = vtk.vtkQuadricClustering()
q.SetInputConnection(sphere.GetOutputPort())
q.SetNumberOfXDivisions(5)
q.SetNumberOfYDivisions(5)
q.SetNumberOfZDivisions(10)
q.UseInputPointsOn()
streamer = vtk.vtkPolyDataStreamer()
# streamer.SetInputConnection(deci.GetOutputPort())
streamer.SetInputConnection(q.GetOutputPort())
# streamer.SetInputConnection(pdn.GetOutputPort())
streamer.SetNumberOfStreamDivisions(NUMBER_OF_PIECES)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(streamer.GetOutputPort())
mapper.ScalarVisibilityOff()
mapper.SetPiece(0)
mapper.SetNumberOfPieces(2)
mapper.ImmediateModeRenderingOn()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(GetRGBColor('english_red'))
# Add the actors to the renderer, set the background and size
#
ren1.GetActiveCamera().SetPosition(5, 5, 10)
ren1.GetActiveCamera().SetFocalPoint(0, 0, 0)
ren1.AddActor(actor)
ren1.SetBackground(1, 1, 1)
renWin.SetSize(300, 300)
iren.Initialize()
# render the image
#iren.Start()
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Filters/Core/Testing/Python/StreamPolyData.py
|
Python
|
gpl-3.0
| 2,667
|
[
"VTK"
] |
864bbb79919c2d989531f611000b3d0c6a000c5e765da205aaa7cebdc35b7aa1
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.test import TestCase
from zilencer.models import Deployment
from zerver.models import (
get_realm, get_user_profile_by_email,
PreregistrationUser, Realm, ScheduledJob, UserProfile,
)
from zerver.lib.actions import (
create_stream_if_needed,
do_add_subscription,
set_default_streams,
)
from zerver.lib.digest import send_digest_email
from zerver.lib.notifications import enqueue_welcome_emails, one_click_unsubscribe_link
from zerver.lib.test_helpers import AuthedTestCase, find_key_by_email, queries_captured
from zerver.lib.test_runner import slow
from zerver.lib.session_user import get_session_dict_user
import re
import ujson
from urlparse import urlparse
class PublicURLTest(TestCase):
"""
Account creation URLs are accessible even when not logged in. Authenticated
URLs redirect to a page.
"""
def fetch(self, method, urls, expected_status):
for url in urls:
if method == "get":
response = self.client.get(url)
else:
response = self.client.post(url)
self.assertEqual(response.status_code, expected_status,
msg="Expected %d, received %d for %s to %s" % (
expected_status, response.status_code, method, url))
def test_public_urls(self):
"""
Test which views are accessible when not logged in.
"""
# FIXME: We should also test the Tornado URLs -- this codepath
# can't do so because this Django test mechanism doesn't go
# through Tornado.
get_urls = {200: ["/accounts/home/", "/accounts/login/"],
302: ["/"],
401: ["/api/v1/streams/Denmark/members",
"/api/v1/users/me/subscriptions",
"/api/v1/messages",
],
}
post_urls = {200: ["/accounts/login/"],
302: ["/accounts/logout/"],
401: ["/json/get_public_streams",
"/json/get_old_messages",
"/json/update_pointer",
"/json/send_message",
"/json/invite_users",
"/json/settings/change",
"/json/subscriptions/remove",
"/json/subscriptions/exists",
"/json/subscriptions/add",
"/json/subscriptions/property",
"/json/get_subscribers",
"/json/fetch_api_key",
"/api/v1/users/me/subscriptions",
],
400: ["/api/v1/send_message",
"/api/v1/external/github",
"/api/v1/fetch_api_key",
],
}
for status_code, url_set in get_urls.iteritems():
self.fetch("get", url_set, status_code)
for status_code, url_set in post_urls.iteritems():
self.fetch("post", url_set, status_code)
def test_get_gcid_when_not_configured(self):
with self.settings(GOOGLE_CLIENT_ID=None):
resp = self.client.get("/api/v1/fetch_google_client_id")
self.assertEquals(400, resp.status_code,
msg="Expected 400, received %d for GET /api/v1/fetch_google_client_id" % resp.status_code,
)
data = ujson.loads(resp.content)
self.assertEqual('error', data['result'])
def test_get_gcid_when_configured(self):
with self.settings(GOOGLE_CLIENT_ID="ABCD"):
resp = self.client.get("/api/v1/fetch_google_client_id")
self.assertEquals(200, resp.status_code,
msg="Expected 200, received %d for GET /api/v1/fetch_google_client_id" % resp.status_code,
)
data = ujson.loads(resp.content)
self.assertEqual('success', data['result'])
self.assertEqual('ABCD', data['google_client_id'])
class LoginTest(AuthedTestCase):
"""
Logging in, registration, and logging out.
"""
def test_login(self):
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_bad_password(self):
self.login("hamlet@zulip.com", "wrongpassword")
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_nonexist_user(self):
result = self.login("xxx@zulip.com", "xxx")
self.assertIn("Please enter a correct email and password", result.content)
def test_register(self):
realm = get_realm("zulip.com")
streams = ["stream_%s" % i for i in xrange(40)]
for stream in streams:
create_stream_if_needed(realm, stream)
set_default_streams(realm, streams)
with queries_captured() as queries:
self.register("test", "test")
# Ensure the number of queries we make is not O(streams)
self.assert_length(queries, 67)
user_profile = get_user_profile_by_email('test@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_register_deactivated(self):
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
realm = get_realm("zulip.com")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.register("test", "test")
self.assertIn("has been deactivated", result.content.replace("\n", " "))
with self.assertRaises(UserProfile.DoesNotExist):
get_user_profile_by_email('test@zulip.com')
def test_login_deactivated(self):
"""
If you try to log in to a deactivated realm, you get a clear error page.
"""
realm = get_realm("zulip.com")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.login("hamlet@zulip.com")
self.assertIn("has been deactivated", result.content.replace("\n", " "))
def test_logout(self):
self.login("hamlet@zulip.com")
self.client.post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
def test_non_ascii_login(self):
"""
You can log in even if your password contain non-ASCII characters.
"""
email = "test@zulip.com"
password = u"hümbüǵ"
# Registering succeeds.
self.register("test", password)
user_profile = get_user_profile_by_email(email)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.client.post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
# Logging in succeeds.
self.client.post('/accounts/logout/')
self.login(email, password)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_register_first_user_with_invites(self):
"""
The first user in a realm has a special step in their signup workflow
for inviting coworkers. Do as realistic an end-to-end test as we can
without Tornado running.
"""
username = "user1"
password = "test"
domain = "test.com"
email = "user1@test.com"
# Create a new realm to ensure that we're the first user in it.
Realm.objects.create(domain=domain, name="Test Inc.")
# Start the signup process by supplying an email address.
result = self.client.post('/accounts/home/', {'email': email})
# Check the redirect telling you to check your mail for a confirmation
# link.
self.assertEquals(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s@%s" % (username, domain)))
result = self.client.get(result["Location"])
self.assertIn("Check your email so we can get started.", result.content)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
result = self.client.get(confirmation_url)
self.assertEquals(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(username, password, domain)
self.assertEquals(result.status_code, 302)
self.assertTrue(result["Location"].endswith("/invite/"))
# Invite coworkers to join you.
result = self.client.get(result["Location"])
self.assertIn("You're the first one here!", result.content)
# Reset the outbox for our invites.
outbox.pop()
invitees = ['alice@' + domain, 'bob@' + domain]
params = {
'invitee_emails': ujson.dumps(invitees)
}
result = self.client.post('/json/bulk_invite_users', params)
self.assert_json_success(result)
# We really did email these users, and they have PreregistrationUser
# objects.
email_recipients = [message.recipients()[0] for message in outbox]
self.assertEqual(len(outbox), len(invitees))
self.assertItemsEqual(email_recipients, invitees)
user_profile = get_user_profile_by_email(email)
self.assertEqual(len(invitees), PreregistrationUser.objects.filter(
referred_by=user_profile).count())
# After this we start manipulating browser information, so stop here.
class InviteUserTest(AuthedTestCase):
def invite(self, users, streams):
"""
Invites the specified users to Zulip with the specified streams.
users should be a string containing the users to invite, comma or
newline separated.
streams should be a list of strings.
"""
return self.client.post("/json/invite_users",
{"invitee_emails": users,
"stream": streams})
def check_sent_emails(self, correct_recipients):
from django.core.mail import outbox
self.assertEqual(len(outbox), len(correct_recipients))
email_recipients = [email.recipients()[0] for email in outbox]
self.assertItemsEqual(email_recipients, correct_recipients)
def test_bulk_invite_users(self):
# The bulk_invite_users code path is for the first user in a realm.
self.login('hamlet@zulip.com')
invitees = ['alice@zulip.com', 'bob@zulip.com']
params = {
'invitee_emails': ujson.dumps(invitees)
}
result = self.client.post('/json/bulk_invite_users', params)
self.assert_json_success(result)
self.check_sent_emails(invitees)
def test_successful_invite_user(self):
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
def test_multi_user_invite(self):
"""
Invites multiple users with a variety of delimiters.
"""
self.login("hamlet@zulip.com")
# Intentionally use a weird string.
self.assert_json_success(self.invite(
"""bob-test@zulip.com, carol-test@zulip.com,
dave-test@zulip.com
earl-test@zulip.com""", ["Denmark"]))
for user in ("bob", "carol", "dave", "earl"):
self.assertTrue(find_key_by_email("%s-test@zulip.com" % user))
self.check_sent_emails(["bob-test@zulip.com", "carol-test@zulip.com",
"dave-test@zulip.com", "earl-test@zulip.com"])
def test_missing_or_invalid_params(self):
"""
Tests inviting with various missing or invalid parameters.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(
self.client.post("/json/invite_users", {"invitee_emails": "foo@zulip.com"}),
"You must specify at least one stream for invitees to join.")
for address in ("noatsign.com", "outsideyourdomain@example.net"):
self.assert_json_error(
self.invite(address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
self.check_sent_emails([])
def test_invalid_stream(self):
"""
Tests inviting to a non-existent stream.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(self.invite("iago-test@zulip.com", ["NotARealStream"]),
"Stream does not exist: NotARealStream. No invites were sent.")
self.check_sent_emails([])
def test_invite_existing_user(self):
"""
If you invite an address already using Zulip, no invitation is sent.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(
self.client.post("/json/invite_users",
{"invitee_emails": "hamlet@zulip.com",
"stream": ["Denmark"]}),
"We weren't able to invite anyone.")
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email="hamlet@zulip.com"))
self.check_sent_emails([])
def test_invite_some_existing_some_new(self):
"""
If you invite a mix of already existing and new users, invitations are
only sent to the new users.
"""
self.login("hamlet@zulip.com")
existing = ["hamlet@zulip.com", "othello@zulip.com"]
new = ["foo-test@zulip.com", "bar-test@zulip.com"]
result = self.client.post("/json/invite_users",
{"invitee_emails": "\n".join(existing + new),
"stream": ["Denmark"]})
self.assert_json_error(result,
"Some of those addresses are already using Zulip, \
so we didn't send them an invitation. We did send invitations to everyone else!")
# We only created accounts for the new users.
for email in existing:
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email=email))
for email in new:
self.assertTrue(PreregistrationUser.objects.get(email=email))
# We only sent emails to the new users.
self.check_sent_emails(new)
def test_invite_outside_domain_in_closed_realm(self):
"""
In a realm with `restricted_to_domain = True`, you can't invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip.com")
zulip_realm.restricted_to_domain = True
zulip_realm.save()
self.login("hamlet@zulip.com")
external_address = "foo@example.com"
self.assert_json_error(
self.invite(external_address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
@slow(0.20, 'inviting is slow')
def test_invite_outside_domain_in_open_realm(self):
"""
In a realm with `restricted_to_domain = False`, you can invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip.com")
zulip_realm.restricted_to_domain = False
zulip_realm.save()
self.login("hamlet@zulip.com")
external_address = "foo@example.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
def test_invite_with_non_ascii_streams(self):
"""
Inviting someone to streams with non-ASCII characters succeeds.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
stream_name = u"hümbüǵ"
realm = get_realm("zulip.com")
stream, _ = create_stream_if_needed(realm, stream_name)
# Make sure we're subscribed before inviting someone.
do_add_subscription(
get_user_profile_by_email("hamlet@zulip.com"),
stream, no_log=True)
self.assert_json_success(self.invite(invitee, [stream_name]))
class EmailUnsubscribeTests(AuthedTestCase):
def test_missedmessage_unsubscribe(self):
"""
We provide one-click unsubscribe links in missed message
e-mails that you can click even when logged out to update your
email notification settings.
"""
user_profile = get_user_profile_by_email("hamlet@zulip.com")
user_profile.enable_offline_email_notifications = True
user_profile.save()
unsubscribe_link = one_click_unsubscribe_link(user_profile,
"missed_messages")
result = self.client.get(urlparse(unsubscribe_link).path)
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="hamlet@zulip.com")
self.assertFalse(user_profile.enable_offline_email_notifications)
def test_welcome_unsubscribe(self):
"""
We provide one-click unsubscribe links in welcome e-mails that you can
click even when logged out to stop receiving them.
"""
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email("hamlet@zulip.com")
# Simulate a new user signing up, which enqueues 2 welcome e-mails.
enqueue_welcome_emails(email, "King Hamlet")
self.assertEqual(2, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from the welcome e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
result = self.client.get(urlparse(unsubscribe_link).path)
# The welcome email jobs are no longer scheduled.
self.assertEqual(result.status_code, 200)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
def test_digest_unsubscribe(self):
"""
We provide one-click unsubscribe links in digest e-mails that you can
click even when logged out to stop receiving them.
Unsubscribing from these emails also dequeues any digest email jobs that
have been queued.
"""
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email("hamlet@zulip.com")
self.assertTrue(user_profile.enable_digest_emails)
# Enqueue a fake digest email.
send_digest_email(user_profile, "", "")
self.assertEqual(1, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from digest e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "digest")
result = self.client.get(urlparse(unsubscribe_link).path)
# The setting is toggled off, and scheduled jobs have been removed.
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="hamlet@zulip.com")
self.assertFalse(user_profile.enable_digest_emails)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
|
gkotian/zulip
|
zerver/test_signup.py
|
Python
|
apache-2.0
| 20,333
|
[
"VisIt"
] |
3d6a2c7ce48c1a3a17055284f9937438c0420909251c7a840ebcaa3233477485
|
import time
from base_handler import BlogHandler
from models import User
from utility import valid_email, valid_username, valid_password, hash_str
class RegistrationHandler(BlogHandler):
def get(self):
self.render("register.html")
def post(self):
has_error = False
error_list = []
fullname = self.request.get('fullname')
location = self.request.get('location')
email = self.request.get('email')
u_name = self.request.get('username')
password = self.request.get('password')
conf_pass = self.request.get('confirm_password')
if fullname and email and u_name and password and conf_pass and location:
if not valid_username(u_name):
error_list.append('The Username is not valid!\
Username should not have any special characters!')
if not valid_email(email):
error_list.append('The Email is not valid!\
Email should be of the format : abc@example.com!')
has_error = True
if not valid_password(password):
error_list.append("password should be greater than 3 \
characters and less than 20 characters!")
has_error = True
elif password != conf_pass:
error_list.append("Both Passwords should match!")
has_error = True
user = User.query(User.email == email).get()
if user:
error_list.append("That email address already exists!")
has_error = True
user = User.query(User.user_name == u_name).get()
if user:
error_list.append('Sorry that username is already taken!\
Try another username!')
has_error = True
if has_error:
self.render('register.html', fullname=fullname,
location=location,
email=email,
user_name=u_name,
error_list=error_list)
else:
new_user = User(fullname=fullname,
location=location,
email=email,
user_name=u_name,
password=hash_str(password))
new_user_key = new_user.put()
# new_user_photo_obj = UserPhoto(user=new_user_key)
# new_user_photo_obj.put()
time.sleep(0.2)
# message = mail.EmailMessage(
# sender='hello@your-own-blog.appspotmail.com',
# subject="Thank You for Connecting with Us!")
# message.to = email
# message.body = """Dear %s:
# Your your-own-blog account has been approved. You can now visit
# http://your-own-blog.appspot.com and sign in using your Google Account to
# access new features.
# Please let us know if you have any questions.
# The your-own-blog Team
# """ % (fullname)
# message.send()
self.render('login.html', new_user=new_user.fullname)
else:
error_list.append("You did not fill atleast one of the fields!")
self.render('register.html', fullname=fullname,
location=location,
email=email,
user_name=u_name,
error_list=error_list)
|
ghoshabhi/Multi-User-Blog
|
handlers/registration_handler.py
|
Python
|
mit
| 3,696
|
[
"VisIt"
] |
c7b711527d7dc61aa3398915f9cc80222018d64b97b978e1bfb40c27bb869535
|
import unittest
from chartbeatpy import Chartbeat
class ChartbeatTest(unittest.TestCase):
def setUp(self):
self.beat = Chartbeat("317a25eccba186e0f6b558f45214c0e7", "avc.com")
self.keys = [
Chartbeat.READ, Chartbeat.WRITE, Chartbeat.IDLE
]
def test_histogram(self):
data = self.beat.histogram(keys=self.keys, breaks=[1, 5, 10])
for key in self.keys:
self.assertTrue(key in data)
def test_path_summary(self):
data = self.beat.path_summary(keys=self.keys, types=["n", "n", "s"])
for path in data:
for key in self.keys:
self.assertTrue(key in data[path])
def test_quickstats(self):
data = self.beat.quickstats()
self.assertTrue("visit" in data)
def test_api_version(self):
altbeat = Chartbeat("317a25eccba186e0f6b558f45214c0e7", "avc.com", api_version='3')
data = altbeat.quickstats()
self.assertTrue("visit" in data)
def test_geo(self):
data = self.beat.geo()
self.assertTrue("lat_lngs" in data)
def test_summary(self):
data = self.beat.summary(keys=["read", "write", "idle"])
self.assertTrue("read" in data)
self.assertTrue("write" in data)
self.assertTrue("idle" in data)
def test_recent(self):
data = self.beat.recent()
self.assertIsInstance(data, list)
def test_referrers(self):
data = self.beat.referrers()
self.assertTrue("referrers" in data)
def test_top_pages(self):
data = self.beat.top_pages()
self.assertIsInstance(data, list)
def test_engage_series(self):
data = self.beat.engage_series()
self.assertTrue("data" in data)
def test_engage_stats(self):
data = self.beat.engage_stats()
self.assertTrue("data" in data)
def test_social_series(self):
data = self.beat.social_series()
self.assertTrue("data" in data)
def test_social_stats(self):
data = self.beat.social_stats()
self.assertTrue("data" in data)
def test_traffic_series(self):
data = self.beat.traffic_series()
self.assertTrue("data" in data)
def test_traffice_stats(self):
data = self.beat.traffic_stats()
self.assertTrue("data" in data)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
michigan-com/chartbeatpy
|
tests/test_chartbeat.py
|
Python
|
mit
| 2,386
|
[
"VisIt"
] |
5a95c2ff64b2f1b9bf9cf5373bd46cc089f273e199e306d85854ce8b90674f92
|
# coding: utf8
from __future__ import unicode_literals, print_function, division
import re
from collections import defaultdict
from itertools import groupby, chain, combinations
import io
from clld.db.meta import DBSession
from clld.db.models import common
from cdk import models
SOURCE_MAP = {
'WΕR2': 'WER2',
'CHCC81': 'СНСС81',
'СНCC81': 'СНСС81',
'МКД': 'МКД1',
'CHCC76': 'СНСС76',
'WΕR1': 'WER1',
'СНСC72': 'СНСС72',
'CНCC72': 'СНСС72',
'CHCC72': 'СНСС72',
'КС': 'КСД',
'АК60': 'АК1',
'K67': 'К67',
'КТФ': 'КФТ',
'КСб13': 'КСб',
('МКД', '654'): ('МКД3', '111'),
('МКД', '147'): ('МКД2', '147'),
('КСб13', '74'): ('ПМБ', '238'),
}
POS = {
'?': '?',
'adj': 'adjective',
'adp': 'postposition',
'adv': 'adverb',
'anom': 'action nominal',
'an': 'animate',
'an pl': 'animate plural',
'conj': 'conjunction',
'f': 'feminine class',
'inan': 'inanimate',
'inan pl': 'inanimate plural',
'interj': 'interjection',
'm': 'masculine class',
'mn': 'masculine/neuter class',
'n': 'neuter class',
'n?': 'neuter class?',
'n/m': 'masculine/neuter class',
'm/n': 'masculine/neuter class',
'm/f': 'masculine/feminine class',
'm/f/n': 'masculine/feminine/neuter class',
'f/m': 'masculine/feminine class',
'f/n': 'feminine/neuter class',
'n/f': 'feminine/neuter class',
'no pl': 'no plural form',
'num': 'numeral',
'num an': 'animate numeral',
'num inan': 'inanimate numeral',
'pron attr': 'attributive pronoun',
'pron dem': 'demonstrative pronoun',
'pron indef': 'indefinite pronoun',
'pron indef m': 'masculine indefinite pronoun',
'pron indef f': 'feminine indefinite pronoun',
'pron indef an pl': 'animate plural indefinite pronoun',
'pron inter': 'interrogative pronoun',
'pron inter f': 'feminine interrogative pronoun',
'pron inter m': 'masculine interrogative pronoun',
'pron intens/refl': 'intensive/reflexive pronoun',
'pron pers': 'personal pronoun',
'pron poss': 'possesive pronoun',
'pron rel': 'relative pronoun',
'prtc': 'particle',
'prt\u0441': 'particle',
'pl': 'plural',
'pron': 'pronoun',
'suf': 'suffix',
'v': 'intransitive verb',
'v irr': 'irregular intransitive verb',
'v1': 'v1 - intransitive verb',
'v1/4': 'v1/4 - intransitive verb',
'v2': 'v2 - intransitive verb',
'v2/5': 'v2/5 - intransitive verb',
'v3': 'v3 - intransitive verb',
'v3~v5': 'v3~v5 - intransitive verb',
'v4': 'v4 - intransitive verb',
'v5': 'v5 - intransitive verb',
'vk': 'verb with incorporated subjects',
'vpred': 'verbal predicate',
'vt': 'transitive verb',
'vt irr': 'irregular transitive verb',
'vt1': 'vt1 - transitive verb',
'vt2': 'vt2 - transitive verb',
'vt2/4': 'vt2/4 - transitive verb',
'vt3': 'vt3 - transitive verb',
'vt4': 'vt4 - transitive verb',
}
ASPECTS = {
'caus mom': 'causative momentaneous verb',
'caus iter': 'causative iterative verb',
'': '',
}
DIALECTS = {
'cket': 'Central Ket',
'sket': 'Southern Ket',
'nket': 'Northern Ket',
}
DONORS = {
'nen': 'Nenets',
'rus': 'Russian',
'selk': 'Selkup',
'evenk': 'Evenk',
}
LOCATIONS = {
'leb': 'Lebed',
'imb': 'Imbat',
'bak': 'Baklanikha',
'baikh': 'Baikha',
'baikn': 'Baikha',
'baix': 'Baikha',
'bajk': 'Bajkit',
'bakht': 'Bakhta',
'bakh': 'Bakhta',
'baxt': 'Bakhta',
'bacht': 'Bakhta',
'el': 'Yelogui settlements',
'e.-o': 'Yenisei Ostyak',
'kel': 'Kellog',
'ke': 'Kellog',
'kur': 'Kurejka',
'mad': 'Madujka',
'pak': 'Pakulikha',
'al': 'Alinskoe',
'\u0430l': 'Alinskoe',
'ak': 'Baklanikha',
'ver': 'Vereshchagino',
'v-imb': 'Verkhne-Imbatsk',
'sul': 'Sulomaj',
'sum': 'Sumarokovo',
'sur': 'Surgutikha',
}
def string2regex(s):
return s.replace('.', '\\.').replace('-', '\\-')
DIS_ROMAN_PATTERN = re.compile('\s+(?P<marker>I+)\s*$')
DIS_ARABIC_PATTERN = re.compile('(?P<marker>[1-9]+)$')
DONOR_PATTERN = re.compile('\s*<(?P<name>%s)\.\s*>\s*' % '|'.join(DONORS.keys()))
# dialectal variants in braces:
VARIANTS_PATTERN = re.compile('\((%s)(\.|\s+)' % '|'.join(DIALECTS.keys()))
DIALECT_MARKER_PATTERN = re.compile('(?P<name>ket|%s)(\.\s*|\.?\s+)' % '|'.join(DIALECTS.keys()))
DIALECT_CHUNK_PATTERN = re.compile(',\s*(?:%s)\.\s*' % '|'.join(DIALECTS.keys()))
LOC_PATTERN = re.compile('(?:(\]|\?|!|,|\s\s)\s*|^)(%s)\.,?\s+' %
'|'.join(string2regex(s)
for s in chain(LOCATIONS.keys(), DIALECTS.keys(), ['ket'])))
SOURCE_PATTERN = re.compile('\s*\((?P<src>[^:\s\(\)]+):\s*(?P<pages>[^\)]+)(?:\)\s*$|\),?\s*)')
SOURCE_MARKER = re.compile('\s*\((?P<src>[^:\s\(\)]+):\s*(?P<pages>[^\)]+)\),?\s*')
MEANING_ID = 0
ENTRY_ID = 0
EXAMPLE_ID = 0
SOURCE_ID = 0
PROBLEMS = []
def in_brackets(s):
depth = 0
for index, c in enumerate(s):
if c == '(':
depth += 1
if c == ')':
depth -= 1
if depth == 0:
return s[1:index].strip(), s[index + 1:].strip()
return '', s
def yield_variants(s):
dialects, prev_dialects = [], []
for chunk in s.split(','):
chunk = chunk.strip()
if chunk in DIALECTS:
chunk += '.'
if DIALECT_MARKER_PATTERN.match(chunk):
dialect, form = [ss.strip() for ss in chunk.split('.', 1)]
dialects.append(dialect)
if form:
for dialect in dialects:
yield dialect, form
prev_dialects = dialects[:]
dialects = []
else:
# additional form for the last encountered dialect!
form = chunk
assert prev_dialects and form
for dialect in prev_dialects:
yield dialect, form
for dialect in dialects:
yield dialect, None
class Headword(object):
def __init__(self, headword):
self.donor, self.dialects, self.variants = None, [], defaultdict(list)
match = DONOR_PATTERN.search(headword)
if match:
self.donor = match.group('name')
headword = headword[:match.start()] + headword[match.end():]
match = VARIANTS_PATTERN.search(headword)
if match:
# get matching closing bracket!
variants, rem = in_brackets(headword[match.start():])
for i, (dialect, form) in enumerate(yield_variants(variants)):
if not form:
self.dialects.append(dialect)
else:
self.variants[dialect].append(form)
headword = headword[:match.start()] + rem
headword = re.sub('\s+', ' ', headword.strip())
self.disambiguation = ''
match = DIS_ROMAN_PATTERN.search(headword)
if match:
self.disambiguation = match.group('marker')
headword = headword[:match.start()].strip()
match = DIS_ARABIC_PATTERN.search(headword)
if match:
if self.disambiguation:
self.disambiguation = ' ' + self.disambiguation
self.disambiguation = match.group('marker') + self.disambiguation
headword = headword[:match.start()].strip()
variants = headword.split(' also ')
if len(variants) > 1:
headword = variants[0]
assert len(self.dialects) <= 1
self.variants[self.dialects[0] if self.dialects else None].extend(variants[1:])
self.form = headword
def yield_examples(s):
#
# FIXME: we must retain the first char of LOC_PATTERN if it is !, ?, ]
#
chunks = [(ss or '').strip() for ss in LOC_PATTERN.split(s)]
for i in range(1, len(chunks), 3):
try:
sep = chunks[i].strip()
if sep in '!?]':
chunks[i - 1] += sep
except IndexError:
pass
if chunks[0]:
for res in yield_cited_examples(chunks[0]):
yield res
local_examples = [chunks[i:i + 3] for i in range(1, len(chunks), 3)]
for i, (sep, dialect, chunk) in enumerate(local_examples):
parts = chunk.split(' ', 2)
if len(parts) == 1:
PROBLEMS.append(s)
parts.append('')
src_match = SOURCE_MARKER.search(parts[1])
if src_match:
src, pages = src_match.group('src'), src_match.group('pages')
rus = parts[1][:src_match.start()]
if parts[1][src_match.end():].strip():
parts.insert(2, parts[1][src_match.end():].strip())
else:
src, pages = None, None
rus = parts[1]
text = parts[0]
match = DIALECT_MARKER_PATTERN.match(text)
dialects = []
while match:
text = text[match.end():].strip()
dialects.append(match.group('name'))
match = DIALECT_MARKER_PATTERN.match(text)
for d in dialects:
yield d, text, rus, src, pages
yield dialect, text, rus, src, pages
if len(parts) > 2:
for res in yield_cited_examples(' '.join(parts[2:])):
yield res
def yield_cited_examples(s):
done = False
chunks = [ss.strip() for ss in SOURCE_PATTERN.split(s)]
if len(chunks) == 1:
cchunks = chunks[0].split(' ')
if len(cchunks) % 2 == 0:
for text, rus in [cchunks[i:i + 2] for i in range(0, len(cchunks), 2)]:
yield None, text, rus, None, None
done = True
if not done:
count, rem = divmod(len(chunks), 3)
try:
assert rem == 1 and not chunks[-1]
except AssertionError:
PROBLEMS.append(s)
yield None, chunks[-1], None, None, None
for chunk, src, pages in [chunks[i:i + 3] for i in range(0, count * 3, 3)]:
parts = chunk.split(' ')
while len(parts) % 2 == 0 and len(parts) > 2:
yield None, parts.pop(0), parts.pop(0), None, None
try:
assert len(parts) == 2
yield None, parts[0], parts[1], src, pages
except AssertionError:
PROBLEMS.append(s)
yield None, ' '.join(parts), None, src, pages
def get_entry(**kw):
global ENTRY_ID
ENTRY_ID += 1
kw['pos'] = POS[kw['pos']] if kw['pos'] else None
kw['donor'] = DONORS[kw['donor']] if kw['donor'] else None
return models.Entry(id=str(ENTRY_ID), **kw)
def load(data, reader, ket, contrib, verbs=True):
dis_arabic_pattern = re.compile('(?P<marker>[0-9]+)\.\s+')
global MEANING_ID
global EXAMPLE_ID
global SOURCE_ID
for headword, meanings in groupby(reader, lambda r: (r[0], r[1], r[2])):
meanings = list(meanings)
if not meanings:
continue
headword, pos, aspect_or_plural = headword
pos = pos.strip()
if (not pos and not headword) or (headword == 'lemma' and pos == 'POS'):
continue
assert (not pos) or (pos in POS), 'pos: %s, %s' % pos
headword = Headword(headword)
entries = []
kw = dict(
donor=headword.donor,
disambiguation=headword.disambiguation,
pos=pos,
variant=False,
aspect=aspect_or_plural if verbs else None,
plural=None if verbs else aspect_or_plural,)
if headword.dialects:
for dialect in headword.dialects:
entries.append(get_entry(
name=headword.form,
language=data['Language'][dialect],
**kw))
else:
entries.append(get_entry(name=headword.form, language=ket, **kw))
kw['variant'] = True
for dialect, forms in headword.variants.items():
for form in forms:
entries.append(get_entry(
name=form,
language=ket if dialect is None else data['Language'][dialect],
**kw))
DBSession.flush()
for e1, e2 in combinations(entries, 2):
DBSession.add(models.Variants(entry1=e1, entry2=e2))
for j, row in enumerate(meanings):
headword, pos, aspect, russian, german, english, description = row
match = dis_arabic_pattern.match(russian)
if match:
russian = russian[match.end():].strip()
meaning = data['Meaning'].get((russian, german, english))
if not meaning:
MEANING_ID += 1
meaning = data.add(
models.Meaning,
(russian, german, english),
id=str(MEANING_ID),
name=english,
russian=russian,
german=german,
english=english)
for entry in entries:
counterpart = common.UnitValue(
id='%s-%s' % (entry.id, j + 1),
name=entry.name,
description='%s / %s / %s' % (english, russian, german),
contribution=contrib,
unit=entry,
unitparameter=meaning)
for k, (loc, text, rus, source, pages) in enumerate(yield_examples(description.strip())):
example = data['Sentence'].get((text, rus, loc))
if example is None:
EXAMPLE_ID += 1
example = data.add(
common.Sentence,
(text, rus, loc),
id='%s' % EXAMPLE_ID,
language=data['Language'].get(loc, ket),
name=text,
description=rus)
if source:
if (source, pages) in SOURCE_MAP:
print(source, pages)
source, pages = SOURCE_MAP[(source, pages)]
else:
source = SOURCE_MAP.get(source, source)
src = data['Source'].get(source)
if not src:
print(source)
raise ValueError(source)
SOURCE_ID += 1
src = data.add(
common.Source, source,
id=str(SOURCE_ID),
name=source,
description=None)
DBSession.add(common.SentenceReference(
sentence=example, source=src, description=pages))
models.CounterpartExample(
location=LOCATIONS.get(loc, DIALECTS.get(loc)),
counterpart=counterpart,
sentence=example)
for entry in entries:
DBSession.add(entry)
with io.open('context-problems.txt', 'w', encoding='utf8') as fp:
fp.write('\n\n'.join(PROBLEMS))
|
clld/cdk
|
cdk/scripts/util.py
|
Python
|
apache-2.0
| 15,401
|
[
"CDK"
] |
bda6ed9606b29b49146ebc66bc800d2ef29601bb6b5dfa1d6fae75f7b19ae6ec
|
"""Workflow for collecting diffraction data from high quality crystals in a cassette."""
import csv
import sys
import math
import optparse
import os
import time
from datetime import datetime
"""
@begin simulate_data_collection @desc Workflow for collecting diffraction data from high quality crystals in a cassette.
@param cassette_id @desc The unique ID of the cassette containing the crystals.
@param sample_score_cutoff @desc The minimum quality score required of crystals.
@param data_redundancy @desc The desired redundancy of the data sets collected.
@in sample_spreadsheet @desc CSV file giving quality score for each crystal.
@in calibration_image @desc File used to correct raw diffraction images.
@out corrected_image @desc The corrected diffraction images collected on all crystals.
@out run_log
@out collection_log
@out rejection_log
"""
def simulate_data_collection(cassette_id, sample_score_cutoff, data_redundancy, calibration_image_file):
"""
@begin initialize_run @desc Create run directory and initialize log files.
@out run_log @uri file:run/run_log.txt
"""
if not os.path.exists('run'):
os.makedirs('run')
for filepath in ['run/run_log.txt', 'run/collected_images.csv', 'run/rejected_samples.txt']:
if os.path.exists(filepath):
os.remove(filepath)
with run_logger(log_file_name="run/run_log.txt") as run_log:
run_log.write("Processing samples in cassette " + cassette_id)
run_log.write("Sample quality cutoff: " + str(sample_score_cutoff))
"""
@end initialize_run
"""
"""
@begin load_screening_results @desc Load sample information from spreadsheet.
@param cassette_id
@in sample_spreadsheet_file @as sample_spreadsheet @uri file:cassette_{cassette_id}_spreadsheet.csv
@out sample_name @out sample_quality
"""
sample_spreadsheet_file = 'cassette_{0}_spreadsheet.csv'.format(cassette_id)
for sample_name, sample_quality in spreadsheet_rows(sample_spreadsheet_file):
run_log.write("Sample {0} had score of {1}".format(sample_name, sample_quality))
"""
@end load_screening_results
"""
"""
@begin calculate_strategy @desc Reject unsuitable crystals and compute \n best data sets to collect for accepted crystals.
@param sample_score_cutoff @param data_redundancy @param sample_name @param sample_quality
@out accepted_sample @out rejected_sample @out num_images @out energies
"""
accepted_sample, rejected_sample, num_images, energies = calculate_strategy(sample_name, sample_quality, sample_score_cutoff, data_redundancy)
"""
@end calculate_strategy
"""
"""
@begin log_rejected_sample @desc Record which samples were rejected.
@param cassette_id @param rejected_sample
@out rejection_log @uri file:run/rejected_samples.txt
"""
if (rejected_sample is not None):
run_log.write("Rejected sample {0}".format(rejected_sample))
with open('run/rejected_samples.txt', 'at') as rejection_log:
rejection_log.write("Rejected sample {0} in cassette {1}\n".format(rejected_sample, cassette_id))
continue
"""
@end log_rejected_sample
"""
"""
@begin collect_data_set @desc Collect data set using the given data collection parameters.
@param cassette_id @param num_images @param accepted_sample @param energies
@out sample_id @desc The crystal that the diffraction image was collected from.
@out energy @desc Energy (in eV) at which the diffraction image was collected.
@out frame_number @desc Index of diffraction image within data set.
@out raw_image_file @AS raw_image @desc Path of file storing the raw diffraction image.
@uri file:run/raw/{cassette_id}/{sample_id}/e{energy}/image_{frame_number}.raw
"""
run_log.write("Collecting data set for sample {0}".format(accepted_sample))
sample_id = accepted_sample
for energy, frame_number, intensity, raw_image_file in collect_next_image(cassette_id, sample_id, num_images, energies, 'run/raw/{cassette_id}/{sample_id}/e{energy}/image_{frame_number:03d}.raw'):
run_log.write("Collecting image {0}".format(raw_image_file))
"""
@end collect_data_set
"""
"""
@begin transform_images @desc Correct raw image using the detector calibration image.
@param sample_id @param energy @param frame_number
@in raw_image_file @AS raw_image
@in calibration_image_file @AS calibration_image @uri file:calibration.img
@out corrected_image_file @AS corrected_image @uri file:run/data/{sample_id}/{sample_id}_{energy}eV_{frame_number}.img
@out total_intensity @out pixel_count
"""
(total_intensity, pixel_count, corrected_image_file) = transform_image(raw_image_file, 'run/data/{0}/{0}_{1}eV_{2:03d}.img'.format(sample_id, energy, frame_number), calibration_image_file)
run_log.write("Wrote transformed image {0}".format(corrected_image_file))
"""
@end transform_images
"""
"""
@begin log_average_image_intensity @desc Record statistics about each diffraction image.
@param cassette_id @param sample_id @param frame_number @param total_intensity @param pixel_count
@in corrected_image_file @AS corrected_image
@out collection_log @uri file:run/collected_images.csv
"""
average_intensity = total_intensity / pixel_count
with open('run/collected_images.csv', 'at') as collection_log_file:
collection_log = csv.writer(collection_log_file, lineterminator=os.linesep)
collection_log.writerow([cassette_id, sample_id, energy, average_intensity, corrected_image_file])
"""
@end log_average_image_intensity
"""
"""
@end simulate_data_collection
"""
def calculate_strategy(sample_name, sample_quality, sample_score_cutoff, data_redundancy):
if sample_quality >= sample_score_cutoff:
accepted_sample = sample_name
rejected_sample = None
num_images = int(sample_quality * data_redundancy) + 2
num_energies = 1 + int(sample_quality/sample_score_cutoff) if sample_score_cutoff > 0 else 5
energies = [10000, 11000, 12000, 13000, 14000][0:num_energies-1]
else:
accepted_sample = None
rejected_sample = sample_name
num_images = 0
energies = []
return accepted_sample, rejected_sample, num_images, energies
def collect_next_image(cassette_id, sample_id, num_images, energies, image_path_template):
for energy in energies:
for frame_number in range(1, num_images + 1):
raw_image_path = image_path_template.format(cassette_id=cassette_id, sample_id=sample_id, energy=energy, frame_number=frame_number)
with new_image_file(raw_image_path) as raw_image:
intensity = int(math.floor(math.floor(energy / (frame_number + 1)) % math.sqrt(energy)))
raw_image.write_values(10 * [intensity])
yield energy, frame_number, intensity, raw_image_path
def transform_image(raw_image_path, corrected_image_path, calibration_image_path):
with open(raw_image_path, 'rt') as raw_image, open(calibration_image_path, 'rt') as calibration_image, new_image_file(corrected_image_path) as corrected_image:
pixel_count = 0
total_intensity = 0
for line in raw_image:
raw_value = int(line)
correction = int(calibration_image.readline())
adjusted_value = raw_value - correction
corrected_value = adjusted_value if adjusted_value >= 0 else 0
corrected_image.write(corrected_value)
total_intensity += corrected_value
pixel_count += 1
return total_intensity, pixel_count, corrected_image_path
def spreadsheet_rows(spreadsheet_file_name):
with open(spreadsheet_file_name, 'rt') as screening_results:
sample_results = csv.DictReader(screening_results)
for sample in sample_results:
yield sample['id'], int(sample['score'])
class run_logger:
def __init__(self, terminal=sys.stdout, log_file_name=None):
self.log_file = open(log_file_name, 'wt') if log_file_name is not None else None
self.terminal = terminal
def __enter__(self):
return self
def write(self, message):
current_time = time.time()
timestamp = datetime.fromtimestamp(current_time).strftime('%Y-%m-%d %H:%M:%S')
log_message = "{0} {1}\n".format(timestamp, message)
for log in (self.log_file, self.terminal):
if (log is not None):
log.write(log_message)
def __exit__(self, type, value, traceback):
if self.log_file is not None:
self.log_file.close()
class new_image_file:
def __init__(self, image_path):
image_dir = os.path.dirname(image_path)
if not os.path.isdir(image_dir):
os.makedirs(image_dir)
self.image_file = open(image_path, 'wt')
def __enter__(self):
return self
def write(self, value):
self.image_file.write(str(value))
self.image_file.write('\n')
def write_values(self, values):
for value in values:
self.write(value)
def name(self):
return self.image_file.name
def __exit__(self, type, value, traceback):
self.image_file.close()
if __name__ == '__main__':
# define command line options
parser = optparse.OptionParser()
cassette_id = None
parser.add_option("-o", "--cutoff",
type='float',
dest="sample_score_cutoff",
help="Minimum quality score required of crystals (default=0)",
default=0)
parser.add_option("-r", "--redundancy",
type='float',
dest="data_redundancy",
help='The desired redundancy of the data sets collected (default=1)',
default=1)
parser.add_option("-c", "--calibration",
type='string',
dest="calibration_file",
help='Calibration file for transforming raw images (default=calibration.img)',
default='calibration.img')
parser.set_usage("python simulate_data_collection.py <cassette_id> [options]")
# parse command line options
(options, args) = parser.parse_args()
# validate options
if len(args) != 1:
print("\n***** ERROR: Required argument cassette_id was not provided *****\n")
parser.print_help()
exit()
# run the simulation using the provided options
simulate_data_collection(args[0], options.sample_score_cutoff, options.data_redundancy, options.calibration_file)
|
idaks/DataONE-Prov-Summer-2017
|
examples/simulate_data_collection/source-scripts/simulate_data_collection.py
|
Python
|
apache-2.0
| 11,420
|
[
"CRYSTAL"
] |
b99a554073c9896d5cc25f98f3402d06a00a33517930b479af6f93ef8b898cab
|
"""This module provides several built-in models for incoherent
neutron scattering data fitting.
These functions generate a `Model` class instance from
the **lmfit** package [#]_.
References
----------
.. [#] https://lmfit.github.io/lmfit-py/
"""
import operator
import numpy as np
try:
from lmfit import CompositeModel, Model
from lmfit.models import COMMON_GUESS_DOC
except ImportError:
print(
"The lmfit package cannot be found, please install it to use "
"the interface with nPDyn."
)
class Model:
def __init__(self, tmp, **kwargs):
pass
class CompositeModel:
def __init__(self, tmp, **kwargs):
pass
COMMON_GUESS_DOC = ""
from nPDyn.lmfit.lmfit_presets import (
delta,
gaussian,
lorentzian,
jump_diff,
rotations,
voigt,
pseudo_voigt,
kww,
two_diff_state,
hline,
build_2D_model,
)
from nPDyn.lmfit.convolutions import getGlobals
# -------------------------------------------------------
# Helper functions for the models
# -------------------------------------------------------
def guess_from_qens(pars, pGlobals, data, x, q, prefix=None):
"""Estimate starting values from 2D peak data and create Parameters.
Notes
-----
The dataset should be of shape (number of q-values, energies),
that is, the function should be called for each value of
'observable'.
"""
if prefix is None:
prefix = ""
# guess starting values
for qId in range(q.size):
center = x[np.argmax(data[qId])]
amplitude = np.max(data[qId]) * np.pi
b = (np.mean(data[qId, :25]) + np.mean(data[qId, -25:])) / 2
halfMax = (np.max(data[qId]) - np.min(data[qId])) / 2
inter = list((data[qId] - halfMax) ** 2)
id1 = np.argmin(inter)
inter.pop(id1)
id2 = np.argmin(inter)
sigma = np.sqrt((x[id1] - x[id2]) ** 2)
sigma = sigma if sigma != 0.0 else 1.0 # to avoid division by zero
for name, par in zip(
("amplitude", "center", "sigma", "b"),
(amplitude, center, sigma, b),
):
if par in pGlobals:
name = prefix + name
else:
name = "%s%s_%i" % (prefix, name, qId)
if name in pars.keys():
pars[name].set(value=par)
return pars
def update_param_vals(pars, prefix, **kwargs):
"""Update parameter values with keyword arguments."""
for key, val in kwargs.items():
pname = "%s%s" % (prefix, key)
if pname in pars:
pars[pname].value = val
pars.update_constraints()
return pars
# -------------------------------------------------------
# Built-in models
# -------------------------------------------------------
class ModelPVoigtBkgd(CompositeModel):
"""A pseudo-voigt profile with a background term.
Parameters
----------
q : np.array or list
Array of momentum transfer q-values to be used.
kwargs : dict
Additional keyword arguments to pass to :func:`build_2D_model`
"""
def __init__(self, q, **kwargs):
prefix = ""
if "prefix" in kwargs.keys():
prefix = kwargs.pop("prefix")
left = pseudo_voigt(q, prefix="%spv_" % prefix, **kwargs)
right = hline(q, prefix="%sbkgd_" % prefix, **kwargs)
super().__init__(left, right, operator.add)
def guess(self, data, x=None, q=None, **kwargs):
"""Estimate initial model parameter values from data."""
pars = self.make_params()
# guess parameters
for comp in self.components:
pGlobals = getGlobals(comp.make_funcargs(params=pars))
pars.update(
guess_from_qens(pars, pGlobals, data, x, q, prefix=comp.prefix)
)
return update_param_vals(pars, self.prefix, **kwargs)
guess.__doc__ = COMMON_GUESS_DOC
class ModelGaussBkgd(CompositeModel):
"""A Gaussian with a background term.
Can be useful for empty can signal.
Parameters
----------
q : np.array or list
Array of momentum transfer q-values to be used.
kwargs : dict
Additional keyword arguments to pass to :func:`build_2D_model`
"""
def __init__(self, q, **kwargs):
prefix = ""
if "prefix" in kwargs.keys():
prefix = kwargs.pop("prefix")
left = gaussian(q, prefix="%sgauss_" % prefix, **kwargs)
right = hline(q, prefix="%sbkgd_" % prefix, **kwargs)
super().__init__(left, right, operator.add)
def guess(self, data, x=None, q=None, **kwargs):
"""Estimate initial model parameter values from data."""
pars = self.make_params()
# guess parameters
for comp in self.components:
pGlobals = getGlobals(comp.make_funcargs(params=pars))
pars.update(
guess_from_qens(pars, pGlobals, data, x, q, prefix=comp.prefix)
)
return update_param_vals(pars, self.prefix, **kwargs)
guess.__doc__ = COMMON_GUESS_DOC
class ModelDeltaLorentzians(CompositeModel):
"""A Dirac delta with a given number of Lorentzians.
Parameters
----------
q : np.array or list
Array of momentum transfer q-values to be used.
nLor : int, optional
Number of Lorentzians to be included in the model.
kwargs : dict
Additional keyword arguments to pass to :func:`build_2D_model`
"""
def __init__(self, q, nLor=2, **kwargs):
prefix = ""
if "prefix" in kwargs.keys():
prefix = kwargs.pop("prefix")
left = delta(q, prefix="%sdelta_" % prefix, **kwargs)
right = lorentzian(q, prefix="%sl0_" % prefix, **kwargs)
for i in range(1, nLor):
right = CompositeModel(
right,
lorentzian(q, prefix="%sl%i_" % (prefix, i), **kwargs),
operator.add,
)
super().__init__(left, right, operator.add)
def guess(self, data, x=None, q=None, **kwargs):
"""Estimate initial model parameter values from data."""
pars = self.make_params()
# guess parameters
for comp in self.components:
pGlobals = getGlobals(comp.make_funcargs(params=pars))
pars.update(
guess_from_qens(pars, pGlobals, data, x, q, prefix=comp.prefix)
)
return update_param_vals(pars, self.prefix, **kwargs)
guess.__doc__ = COMMON_GUESS_DOC
|
kpounot/nPDyn
|
nPDyn/lmfit/lmfit_builtins.py
|
Python
|
gpl-3.0
| 6,538
|
[
"DIRAC",
"Gaussian"
] |
0cc57e3ecaead9efd39ebe224f52754aa9596f395e64a5eb4fef97b362dbb060
|
# coding: utf-8
# # ACS Lab 01 - Scientific Visualization
#
# The first lab of the semester will be structured differently than the rest. The focus of this lab is to familiarize yourself with a wide variety of scientific visualizations. All of the labs this semester will be language agnostic, meaning you can use your language of choice, but they often require a higher level language such as python, matlab or R.
#
# All of my solutions will be written in python. I highly recommend installing python via the Anaconda distribution ([Installation Instructions](https://conda.io/docs/install/full.html)). If you install this way you will have all of the packages I used to complete this assignment.
#
# For this lab you will be asked to complete any 10 of the following 11 visualization tasks.
#
# Remember to label your axes when appropriate and when plotting multiple results on a single plot remember to include a legend.
# In[1]:
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import sys
print('Python version: {}'.format(sys.version[:5]))
print('Matplotlib version: {}'.format(mpl.__version__))
print('Numpy version: {}'.format(np.__version__))
print('Scipy version: {}'.format(scipy.__version__))
print('Pandas version: {}'.format(pd.__version__))
# ## LogLog Convergence
#
# Often when running numerical simulations researchers are interested in how the relative error changes as the resolution of our model is adjusted. A [rate of convergence](https://en.wikipedia.org/wiki/Rate_of_convergence) can be calculated numerically but it is also often visualized on a loglog plot. The file `convergence.csv` contains the errors associated with a Forward Euler method, Trapezoidal method, and the spacial resolution used. Plot dx vs error for both runs on a loglog scale. Include a dashed line with the exact slope of $dx^1$ and $dx^2$ to help guide the eye. The y intercept of the guide lines should be adjusted so that they do not overlap the data.
#
# Hints: An easy way to read in tabular data is using the pandas function `pd.read_csv()`. Use `plt.loglog()` to create a loglog plot. The equation for a straight line in log space is $y = bx^m$ where $m$ is the slope and $b$ is the y intercept.
# **Solution**
# In[ ]:
# ## Iris by Species
#
# Many experiments require the same measurements to be taken on different labeled subjects. A famous example of labeled data is known as the [Iris flower data set](https://en.wikipedia.org/wiki/Iris_flower_data_set). In 1936 Ronald Fisher studied the length and the width of the sepals and petals of three species of iris (Iris setosa, Iris virginica and Iris versicolor). The file `iris.csv` contains this dataset. Plot the petal length vs petal width and color the points based on their species label.
#
# Hints: Look into the pandas function `groupby()`
# **Solution**
# In[ ]:
# ## Anscombe Subplot
#
# A common task in scientific visualization is creating figures with multiple subplots. A classic dataset to promote the importance of visualization is known as the [Anscombe's quartet](https://en.wikipedia.org/wiki/Anscombe%27s_quartet). All four of the data sets have the same mean, standard deviation, and linear regression fit. If you don't believe me check it out yourself! The file `anscombe.csv` contains the dataset. Make a single figure with 4 subplots of x1 vs y1, x2 vs y2, ... etc for all xy pairs.
#
# Hints: Look into the `subplots()` function. Use `plt.tight_layout()` to make sure the axis labels don't overlap.
# **Solution**
# In[ ]:
# ## Gaussian Overlap
#
# In statistics we are often interested in the overlap of two statistical distributions. Plot two [normal distributions](https://en.wikipedia.org/wiki/Normal_distribution) $\mathcal{N}_1(\mu=-1, \sigma=1)$ and $\mathcal{N}_2(\mu=1, \sigma=1)$. Shade in the region where they overlap. Note their intersection is at $x=0$.
#
# Hints: You can use `from matplotlib.mlab import normpdf` for an easy way to calculate the normal distribution. Look up `plt.fill_between` for the shading.
# **Solution**
# In[ ]:
# ## Double Y-Axis
#
# Sometimes it is useful to see how two different quantities vary with respect to a common variable. [Spurious Correlations](http://www.tylervigen.com/spurious-correlations) is a famous website for emphasizing that correlation is not the same as causation. The file `div_mar.csv` contains the number of divorces in Maine per 1000 people and the per capita consumption of margarine measured in pounds for each year from 2000 to 2009. Plot both of these variables with respect to the year in a single figure with a different y axis for the right and left side of the figure. If you would like, also numerically calculate the correlation.
#
# Hints: Look up the `ax.twinx()` function. Use `np.corrcoef()` for the correlation calculation
# **Solution**
# In[ ]:
# ## Brain Slice
#
# Data can come in many dimensions. One technique for looking into internal structures of 3D data is to plot a 2D slice along a fixed axis. The file `brain.csv` contains the intensity data of an average brain taken from MRI scans of 152 healthy individuals from the [Montreal Neurological Institute](http://www.mcgill.ca/neuro/neuro-brain-research-patient-care-and-training). The data has been saved as a 1D array of length (902629) and must be reshaped into a (91, 109, 91) array before plotting. Make three images of the x = 47, y = 40, and z = 50 planes.
#
# Hints: Use `plt.imshow()` for visualization and `cmap='gray'` to make the image gray scale. Read the data in using `np.loadtxt()` and use `np.reshape()` to make the data the correct shape.
# **Solution**
# In[ ]:
# ## ODE Streamline
#
# When presented with a set of ordinary differential equations one can explore the [phase portrait](https://en.wikipedia.org/wiki/Phase_portrait) to find equilibrium points. Use the following ODE to create a streamline plot and approximately determine the equilibrium points.
#
# \begin{align*}
# \frac{dx}{dt} &= xy + 3y \\
# \frac{dy}{dt} &= xy - 3x
# \end{align*}
#
# Hints: Look up `ax.streamplot()`.
# **Solution**
# In[ ]:
# ## Volcano Contour
#
# One way to visualize a 3D surface is with the help of contour maps. The file `volcano.csv` contains topographic information for Auckland's [Maunga Whau Volcano](https://en.wikipedia.org/wiki/Maungawhau) on a 870m by 610m grid with height measurements taken every 10m. Create a contour map, including labeled contour lines.
#
# Hints: Use `plt.contourf()` and `plt.contour()` for the plotting and `plt.clabel()` for the contour labels. Use `figsize = (6.1, 8.7)` for the dimensions of the figure to avoid warping.
# **Solution**
# In[ ]:
# ## 3D Gamma Function
#
# In complex analysis people study how a funciton changes on the complex plane. A pole of the function $f(z)$ at point $a$ is defined by the function approaching infinity as $z$ approaches $a$. Make a 3D surface plot of the absolute value of the [gamma function](https://en.wikipedia.org/wiki/Gamma_function) on the complex plane. Notice the poles along the real axis.
#
# Hints: Use `from scipy.special import gamma` for the gamma function. Create a real mesh using `np.meshgrid()` then call the gamma function using `(X + Y*1j)` to make it complex. Adjust the resolution of the meshgrid if needed.
# **Solution**
# In[ ]:
# ## Great Circles
#
# When creating a world map you have to project a 3D object into a 2D plane. Different projections produce distortions on the resulting map. The shortest path between two points in Euclidean space is a straight line, but when dealing with curved surfaces, such as the earth, the shortest path becomes more complicated. On a sphere the shortest path between two points is called the [Great Circle](https://en.wikipedia.org/wiki/Great_circle). Create a world map using the [Robinson projection](https://en.wikipedia.org/wiki/Robinson_projection) and draw the great circle connecting Tallahassee, Florida (30.445062,-84.299628) to Berlin, Germany (52.518059, 13.405331), then from Berlin to Johannesburg, South Africa (-26.201209, 28.046225) and finally from Johannesburg to Sydney, Australia (-33.856292, 151.215395).
#
#
# Hints: Look up how to install `Basemap` for python. There is a function called `drawgreatcircle()` which is useful.
# **Solution**
# In[ ]:
# ## Cycloid Animation
#
# The curve traced by a point on the rim of a circular wheel as the wheel rolls along a straight line without slippage is known as a [cycloid](https://en.wikipedia.org/wiki/Cycloid). For a circle of radius $r$ the parametric equations for the cycloid are
#
# \begin{align*}
# x &= r(t - \sin(t)) \\
# y &= r(1 - \cos(t))
# \end{align*}
#
# Make an animation of the line traced out by a cycloid of radius $r=1$ that completes two rotations. Try to include the cycloid point, the line traced out by the point and the circle that creates the cycloid.
#
# Hints: For given $t$, the circle's centre lies at $x = rt$, $y = r$. Use `from matplotlib import animation` for the animation.
# **Solution**
# In[ ]:
|
eitanlees/ISC-5315
|
ACS1_Lab_01/ACS1_Lab_01.py
|
Python
|
mit
| 9,149
|
[
"Gaussian"
] |
268ddad696fdc947899444960049ec472129b54fe976a34327fe63a69c102a80
|
import numpy as np
from mdtraj.testing import eq
from msmbuilder.example_datasets import AlanineDipeptide
from msmbuilder.featurizer import AtomPairsFeaturizer, get_atompair_indices
from msmbuilder.featurizer.subset import SubsetAtomPairs, \
SubsetCosPhiFeaturizer, SubsetCosPsiFeaturizer, SubsetFeatureUnion, \
SubsetSinPhiFeaturizer, SubsetSinPsiFeaturizer
def test_SubsetAtomPairs_1():
trajectories = AlanineDipeptide().get_cached().trajectories
trj0 = trajectories[0][0]
atom_indices, pair_indices = get_atompair_indices(trj0)
featurizer = AtomPairsFeaturizer(pair_indices)
X_all0 = featurizer.transform(trajectories)
featurizer = SubsetAtomPairs(pair_indices, trj0)
featurizer.subset = np.arange(len(pair_indices))
X_all = featurizer.transform(trajectories)
any([eq(x, x0) for (x, x0) in zip(X_all, X_all0)])
def test_SubsetAtomPairs_2():
trajectories = AlanineDipeptide().get_cached().trajectories
trj0 = trajectories[0][0]
atom_indices, pair_indices = get_atompair_indices(trj0)
featurizer = AtomPairsFeaturizer(pair_indices)
X_all0 = featurizer.transform(trajectories)
featurizer = SubsetAtomPairs(pair_indices, trj0,
subset=np.arange(len(pair_indices)))
X_all = featurizer.transform(trajectories)
any([eq(x, x0) for (x, x0) in zip(X_all, X_all0)])
def test_SubsetAtomPairs_3():
trajectories = AlanineDipeptide().get_cached().trajectories
trj0 = trajectories[0][0]
atom_indices, pair_indices = get_atompair_indices(trj0)
featurizer = AtomPairsFeaturizer(pair_indices)
X_all0 = featurizer.transform(trajectories)
featurizer = SubsetAtomPairs(pair_indices, trj0, subset=np.array([0, 1]))
X_all = featurizer.transform(trajectories)
try:
any([eq(x, x0) for (x, x0) in zip(X_all, X_all0)])
except AssertionError:
pass
else:
raise AssertionError("Did not raise an assertion!")
def test_that_all_featurizers_run():
trajectories = AlanineDipeptide().get_cached().trajectories
trj0 = trajectories[0][0]
atom_indices, pair_indices = get_atompair_indices(trj0)
atom_featurizer0 = SubsetAtomPairs(pair_indices, trj0, exponent=-1.0)
cosphi = SubsetCosPhiFeaturizer(trj0)
sinphi = SubsetSinPhiFeaturizer(trj0)
cospsi = SubsetCosPsiFeaturizer(trj0)
sinpsi = SubsetSinPsiFeaturizer(trj0)
featurizer = SubsetFeatureUnion([
("pairs", atom_featurizer0),
("cosphi", cosphi),
("sinphi", sinphi),
("cospsi", cospsi),
("sinpsi", sinpsi)
])
featurizer.subsets = [np.arange(1) for i in range(featurizer.n_featurizers)]
X_all = featurizer.transform(trajectories)
eq(X_all[0].shape[1], 1 * featurizer.n_featurizers)
|
dr-nate/msmbuilder
|
msmbuilder/tests/test_featurizer_subset.py
|
Python
|
lgpl-2.1
| 2,780
|
[
"MDTraj"
] |
0edee68e8ba8e6894b70a107555b70c45b1b5ce81071da0c1c7a776776ea530d
|
import calendar
import datetime
import logging
import uuid
from celery.result import AsyncResult
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from .fields import BoundingBoxField, RasterRendererField
from .utils import auto_memoize
logger = logging.getLogger(__name__)
SERVICE_DATA_ROOT = getattr(settings, 'NC_SERVICE_DATA_ROOT', '/var/ncdjango/services/')
TEMPORARY_FILE_LOCATION = getattr(settings, 'NC_TEMPORARY_FILE_LOCATION', 'temp')
USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
class Service(models.Model):
"""
A service maps to a single NetCDF dataset. Services contain general metadata (name, description), and information
about the data extend, projection, and support for time.
"""
CALENDAR_CHOICES = (
('standard', 'Standard Gregorian'),
('noleap', 'Standard, no leap years'),
('360', '360-day years')
)
TIME_UNITS_CHOICES = (
('milliseconds', 'Milliseconds'),
('seconds', 'Seconds'),
('minutes', 'Minutes'),
('hours', 'Hours'),
('days', 'Days'),
('weeks', 'Weeks'),
('months', 'Months'),
('years', 'Years'),
('decades', 'Decades'),
('centuries', 'Centuries')
)
name = models.CharField(max_length=256, db_index=True, unique=True)
description = models.TextField(null=True)
data_path = models.FilePathField(path=SERVICE_DATA_ROOT, recursive=True, max_length=1024)
projection = models.TextField() # PROJ4 definition
full_extent = BoundingBoxField()
initial_extent = BoundingBoxField()
supports_time = models.BooleanField(default=False)
time_start = models.DateTimeField(null=True)
time_end = models.DateTimeField(null=True)
time_interval = models.PositiveIntegerField(null=True)
time_interval_units = models.CharField(max_length=15, choices=TIME_UNITS_CHOICES, null=True)
calendar = models.CharField(max_length=10, choices=CALENDAR_CHOICES, null=True)
render_top_layer_only = models.BooleanField(default=True)
def save(self, *args, **kwargs):
has_required_time_fields = (
self.time_start and self.time_end and self.time_interval and self.time_interval_units and self.calendar
)
if self.supports_time and not has_required_time_fields:
raise ValidationError("Service supports time but is missing one or more time-related fields")
return super(Service, self).save(*args, **kwargs)
class Variable(models.Model):
"""
A variable in a map service. This is usually presented as a layer in a web interface. Each service may have one
or more variables. Each variable maps to a variable in the NetCDF dataset.
"""
service = models.ForeignKey(Service, on_delete=models.CASCADE)
index = models.PositiveIntegerField()
variable = models.CharField(max_length=256)
projection = models.TextField() # PROJ4 definition
x_dimension = models.CharField(max_length=256)
y_dimension = models.CharField(max_length=256)
name = models.CharField(max_length=256, db_index=True)
description = models.TextField(null=True)
renderer = RasterRendererField()
full_extent = BoundingBoxField()
supports_time = models.BooleanField(default=False)
time_dimension = models.CharField(max_length=256, null=True)
time_start = models.DateTimeField(null=True)
time_end = models.DateTimeField(null=True)
time_steps = models.PositiveIntegerField(null=True)
class Meta:
unique_together = ('variable', 'service')
@property
@auto_memoize
def time_stops(self):
""" Valid time steps for this service as a list of datetime objects. """
if not self.supports_time:
return []
if self.service.calendar == 'standard':
units = self.service.time_interval_units
interval = self.service.time_interval
steps = [self.time_start]
if units in ('years', 'decades', 'centuries'):
if units == 'years':
years = interval
elif units == 'decades':
years = 10 * interval
else:
years = 100 * interval
next_value = lambda x: x.replace(year=x.year + years)
elif units == 'months':
def _fn(x):
year = x.year + (x.month + interval - 1) // 12
month = (x.month + interval) % 12 or 12
day = min(x.day, calendar.monthrange(year, month)[1])
return x.replace(year=year, month=month, day=day)
next_value = _fn
else:
if units == 'milliseconds':
delta = datetime.timedelta(milliseconds=interval)
elif units == 'seconds':
delta = datetime.timedelta(seconds=interval)
elif units == 'minutes':
delta = datetime.timedelta(minutes=interval)
elif units == 'hours':
delta = datetime.timedelta(hours=interval)
elif units == 'days':
delta = datetime.timedelta(days=interval)
elif units == 'weeks':
delta = datetime.timedelta(weeks=interval)
else:
raise ValidationError(
"Service has an invalid time_interval_units: {}".format(self.service.time_interval_units)
)
next_value = lambda x: x + delta
while steps[-1] < self.time_end:
value = next_value(steps[-1])
if value > self.time_end:
break
steps.append(value)
return steps
else:
# TODO
raise NotImplementedError
def save(self, *args, **kwargs):
has_required_time_fields = (self.time_dimension and self.time_start and self.time_end)
if self.supports_time and not has_required_time_fields:
raise ValidationError("Variable supports time but is missing one or more time-related fields")
return super(Variable, self).save(*args, **kwargs)
class TemporaryFile(models.Model):
"""A temporary file upload"""
uuid = models.CharField(max_length=36, default=uuid.uuid4)
date = models.DateTimeField(auto_now_add=True)
filename = models.CharField(max_length=100)
filesize = models.BigIntegerField()
file = models.FileField(upload_to=TEMPORARY_FILE_LOCATION, max_length=1024)
@property
def extension(self):
if self.filename.find(".") != -1:
return self.filename[self.filename.rfind(".") + 1:]
else:
return ""
def temporary_file_deleted(sender, instance, **kwargs):
if instance.file.name:
try:
instance.file.delete(save=False)
except IOError:
logger.exception("Error deleting temporary file: %s" % instance.file.name)
models.signals.post_delete.connect(temporary_file_deleted, sender=TemporaryFile)
class ProcessingJob(models.Model):
""" An active, completed, or failed geoprocessing job. """
uuid = models.CharField(max_length=36, default=uuid.uuid4, db_index=True)
job = models.CharField(max_length=100)
user = models.ForeignKey(USER_MODEL, null=True, on_delete=models.SET_NULL)
user_ip = models.CharField(max_length=32)
created = models.DateTimeField(auto_now_add=True)
celery_id = models.CharField(max_length=100)
inputs = models.TextField(null=False, default="{}")
outputs = models.TextField(null=False, default="{}")
@property
def status(self):
""" The status of the celery task for this job. """
return AsyncResult(self.celery_id).status.lower()
class ProcessingResultService(models.Model):
"""
A result service is created from the raster output of a geoprocessing job. This model tracks which services are
automatically generated from job results.
"""
job = models.ForeignKey(ProcessingJob, on_delete=models.CASCADE)
service = models.ForeignKey(Service, on_delete=models.CASCADE)
is_temporary = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
|
consbio/ncdjango
|
ncdjango/models.py
|
Python
|
bsd-3-clause
| 8,338
|
[
"NetCDF"
] |
0f985e49a3e5e78ceae5fd82df01938f885b529f9435c5802e3e1f9c74936dfc
|
# vim: set expandtab shiftwidth=2 softtabstop=2:
# TODO - BundleAPI eh? - seems to be the class we have to override and mess with to get things imported to ChimeraX
from chimerax.core.toolshed import BundleAPI
class _MyAPI(BundleAPI):
@staticmethod
def get_class(class_name):
if class_name == 'ToolUI':
from . import tool
return tool.ToolUI
return None
@staticmethod
def start_tool(session, tool_name, **kw):
from .tool import ToolUI
from chimerax.core import tools
return tools.get_singleton(session, ToolUI, 'Tempy', create=True)
@staticmethod
def register_command(command_name, logger):
# Not sure where logger is needed or what it does yet
from . import cmd
# Not sure the difference between alias and register :/
if command_name == "sccc":
from . import cmd
cmd.register_sccc()
if command_name == "smoc":
from . import cmd
cmd.register_smoc()
if command_name == "nmi":
from . import cmd
cmd.register_nmi()
if command_name == "ccc":
from . import cmd
cmd.register_ccc()
if command_name == "difmap":
from . import cmd
cmd.register_difmap()
bundle_api = _MyAPI()
|
OniDaito/ChimeraXTempy
|
src/__init__.py
|
Python
|
mit
| 1,224
|
[
"ChimeraX"
] |
b8a1f8f502df13a50883fc69b476e4f4a7389988b3bc903505907774cd18bc5d
|
import sys
import git
import pygtk
pygtk.require('2.0')
import gtk
class HelloWorld:
# This is a callback function. The data arguments are ignored
# in this example. More on callbacks below.
def hello(self, widget, data=None):
print "Hello World"
def delete_event(self, widget, event, data=None):
# If you return FALSE in the "delete_event" signal handler,
# GTK will emit the "destroy" signal. Returning TRUE means
# you don't want the window to be destroyed.
# This is useful for popping up 'are you sure you want to quit?'
# type dialogs.
print "delete event occurred"
# Change FALSE to TRUE and the main window will not be destroyed
# with a "delete_event".
return False
def destroy(self, widget, data=None):
print "destroy signal occurred"
gtk.main_quit()
def __init__(self):
# create a new window
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
# When the window is given the "delete_event" signal (this is given
# by the window manager, usually by the "close" option, or on the
# titlebar), we ask it to call the delete_event () function
# as defined above. The data passed to the callback
# function is NULL and is ignored in the callback function.
self.window.connect("delete_event", self.delete_event)
# Here we connect the "destroy" event to a signal handler.
# This event occurs when we call gtk_widget_destroy() on the window,
# or if we return FALSE in the "delete_event" callback.
self.window.connect("destroy", self.destroy)
# Sets the border width of the window.
self.window.set_border_width(10)
# Creates a new button with the label "Hello World".
self.button = gtk.Button("Hello World")
# When the button receives the "clicked" signal, it will call the
# function hello() passing it None as its argument. The hello()
# function is defined above.
self.button.connect("clicked", self.hello, None)
# This will cause the window to be destroyed by calling
# gtk_widget_destroy(window) when "clicked". Again, the destroy
# signal could come from here, or the window manager.
self.button.connect_object("clicked", gtk.Widget.destroy, self.window)
# This packs the button into the window (a GTK container).
self.window.add(self.button)
# The final step is to display this newly created widget.
self.button.show()
# and the window
self.window.show()
def main(self):
# All PyGTK applications must have a gtk.main(). Control ends here
# and waits for an event to occur (like a key press or mouse event).
gtk.main()
print "paste repo URL here"
url = raw_input()
print url
git.Repo.clone_from('https://github.com/DavidAwad/Arduino', 'local/')
with open('local/readme.txt', 'w') as f:
f.write('Hello! Torrent file made by GitTorrent by David Awad, please feel free to visit me at http://davidawad.github.io or at http://spaceshare.me. I make lots of other cool software. Feel free to check out the code to GitTorrent and more at https://github.com/DavidAwad/GitTorrent, the url for this particular project is hosted at '+ url)
hello = HelloWorld()
hello.main()
print 'Success'
|
DavidAwad/GitTorrent
|
app.py
|
Python
|
mit
| 3,405
|
[
"VisIt"
] |
47351da47426efdbecfd21909a14f0454bf58d113f375e40e4865c8da11d9a93
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Python motu client
#
# Motu, a high efficient, robust and Standard compliant Web Server for Geographic
# Data Dissemination.
#
# http://cls-motu.sourceforge.net/
#
# (C) Copyright 2009-2010, by CLS (Collecte Localisation Satellites) -
# http://www.cls.fr - and Contributors
#
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import urllib
import urllib2
import traceback
import platform
import sys
import httplib
import os
import re
import tempfile
import datetime
import shutil
import zipfile
import logging
import logging.config
import ConfigParser
import optparse
import socket
# The necessary required version of Python interpreter
import shlex
import logging
logging.basicConfig()
REQUIRED_VERSION = (2,7)
# error code to use when exiting after exception catch
ERROR_CODE_EXIT=1
# the config file to load from
CFG_FILE = '~/motu-client/motu-client-python.ini'
LOG_CFG_FILE = './etc/log.ini'
# project libraries path
LIBRARIES_PATH = os.path.join(os.path.dirname(__file__), './util')
SECTION = 'Main'
# shared logger
log = None
# shared variables to download
_variables = []
# Manage imports of project libraries
if not os.path.exists(LIBRARIES_PATH):
sys.stderr.write('\nERROR: can not find project libraries path: %s\n\n' % os.path.abspath(LIBRARIES_PATH))
sys.exit(1)
sys.path.append(LIBRARIES_PATH)
# Import project libraries
from aggregator.connectors.motu.util import utils_log
from aggregator.connectors.motu.util import utils_messages
from aggregator.connectors.motu.util import motu_api
def get_client_version():
"""Return the version (as a string) of this client.
The value is automatically set by the maven processing build, so don't
touch it unless you know what you are doing."""
return motu_api.get_client_version()
def get_client_artefact():
"""Return the artifact identifier (as a string) of this client.
The value is automatically set by the maven processing build, so don't
touch it unless you know what you are doing."""
return motu_api.get_client_artefact()
def load_options(options=None):
"""load options to handle"""
_options = None
# create option parser
parser = optparse.OptionParser(version=get_client_artefact() + ' v' + get_client_version())
# create config parser
conf_parser = ConfigParser.SafeConfigParser()
conf_parser.read(os.path.expanduser(CFG_FILE))
# add available options
parser.add_option( '--quiet', '-q',
help = "prevent any output in stdout",
action = 'store_const',
const = logging.WARN,
dest='log_level')
parser.add_option( '--verbose',
help = "print information in stdout",
action='store_const',
const = logging.DEBUG,
dest='log_level')
parser.add_option( '--noisy',
help = "print more information (traces) in stdout",
action='store_const',
const = utils_log.TRACE_LEVEL,
dest='log_level')
parser.add_option( '--user', '-u',
help = "the user name (string)")
parser.add_option( '--pwd', '-p',
help = "the user password (string)")
parser.add_option( '--auth-mode',
default = motu_api.AUTHENTICATION_MODE_CAS,
help = "the authentication mode: '" + motu_api.AUTHENTICATION_MODE_NONE +
"' (for no authentication), '"+ motu_api.AUTHENTICATION_MODE_BASIC +
"' (for basic authentication), or '"+motu_api.AUTHENTICATION_MODE_CAS +
"' (for Central Authentication Service) [default: %default]")
parser.add_option( '--proxy-server',
help = "the proxy server (url)")
parser.add_option( '--proxy-user',
help = "the proxy user (string)")
parser.add_option( '--proxy-pwd',
help = "the proxy password (string)")
parser.add_option( '--motu', '-m',
help = "the motu server to use (url)")
parser.add_option( '--service-id', '-s',
help = "The service identifier (string)")
parser.add_option( '--product-id', '-d',
help = "The product (data set) to download (string)")
parser.add_option( '--date-min', '-t',
help = "The min date with optional hour resolution (string following format YYYY-MM-DD [HH:MM:SS])")
parser.add_option( '--date-max', '-T',
help = "The max date with optional hour resolution (string following format YYYY-MM-DD [HH:MM:SS])",
default = datetime.date.today().isoformat())
parser.add_option( '--latitude-min', '-y',
type = 'float',
help = "The min latitude (float in the interval [-90 ; 90])")
parser.add_option( '--latitude-max', '-Y',
type = 'float',
help = "The max latitude (float in the interval [-90 ; 90])")
parser.add_option( '--longitude-min', '-x',
type = 'float',
help = "The min longitude (float in the interval [-180 ; 180])")
parser.add_option( '--longitude-max', '-X',
type = 'float',
help = "The max longitude (float in the interval [-180 ; 180])")
parser.add_option( '--depth-min', '-z',
type = 'string',
help = "The min depth (float in the interval [0 ; 2e31] or string 'Surface')")
parser.add_option( '--depth-max', '-Z',
type = 'string',
help = "The max depth (float in the interval [0 ; 2e31] or string 'Surface')")
parser.add_option( '--variable', '-v',
help = "The variable (list of strings)",
callback=option_callback_variable,
dest="variable",
type="string",
action="callback")
parser.add_option( '--sync-mode', '-S',
help = "Sets the download mode to synchronous (not recommended)",
action='store_true',
dest='sync')
parser.add_option( '--describe-product', '-D',
help = "Get all updated information on a dataset. Output is in XML format",
action='store_true',
dest='describe')
parser.add_option( '--size',
help = "Get the size of an extraction. Output is in XML format",
action='store_true',
dest='size')
parser.add_option( '--out-dir', '-o',
help = "The output dir where result (download file) is written (string). If it starts with 'console', behaviour is the same as with --console-mode. ",
default=".")
parser.add_option( '--out-name', '-f',
help = "The output file name (string)",
default="data.nc")
parser.add_option( '--block-size',
type = 'int',
help = "The block used to download file (integer expressing bytes)",
default="65536")
parser.add_option( '--socket-timeout',
type = 'float',
help = "Set a timeout on blocking socket operations (float expressing seconds)")
parser.add_option( '--user-agent',
help = "Set the identification string (user-agent) for HTTP requests. By default this value is 'Python-urllib/x.x' (where x.x is the version of the python interpreter)")
parser.add_option( '--outputWritten',
help = "Optional parameter used to set the format of the file returned by the download request: netcdf or netcdf4. If not set, netcdf is used.")
parser.add_option( '--console-mode',
help = "Optional parameter used to display result on stdout, either URL path to download extraction file, or the XML content of getSize or describeProduct requests.",
action='store_true',
dest='console_mode')
# set default values by picking from the configuration file
default_values = {}
default_variables = []
for option in parser.option_list:
if (option.dest != None) and conf_parser.has_option(SECTION, option.dest):
if option.dest == "variable":
default_variables.append(conf_parser.get(SECTION, option.dest))
default_values[option.dest] = default_variables
else:
default_values[option.dest] = conf_parser.get(SECTION, option.dest)
parser.set_defaults( **default_values )
if options is None:
return parser.parse_args()
else:
return parser.parse_args(options)
def option_callback_variable(option, opt, value, parser):
global _variables
_variables.append(value)
setattr(parser.values, option.dest, _variables)
def check_version():
"""Utilitary function that checks the required version of the python interpreter
is available. Raise an exception if not."""
global REQUIRED_VERSION
cur_version = sys.version_info
if (cur_version[0] > REQUIRED_VERSION[0] or
cur_version[0] == REQUIRED_VERSION[0] and
cur_version[1] >= REQUIRED_VERSION[1]):
return
else:
raise Exception( "This tool uses python 2.7 or greater. You version is %s. " % str(cur_version) )
def motu_download(options=None):
# we prepare options we want
(_options, args) = load_options(options=shlex.split(options.replace('\\', '/')) if options else None)
if _options.log_level != None:
logging.getLogger().setLevel(int(_options.log_level))
motu_api.execute_request(_options)
#===============================================================================
# The Main function
#===============================================================================
if __name__ == '__main__':
check_version()
start_time = datetime.datetime.now()
# first initialize the logger
logging.addLevelName(utils_log.TRACE_LEVEL, 'TRACE')
logging.config.fileConfig( os.path.join(os.path.dirname(__file__),LOG_CFG_FILE) )
log = logging.getLogger("motu-client-python")
logging.getLogger().setLevel(logging.INFO)
try:
motu_download()
except Exception, e:
log.error( "Execution failed: %s", e )
if hasattr(e, 'reason'):
log.info( ' . reason: %s', e.reason )
if hasattr(e, 'code'):
log.info( ' . code %s: ', e.code )
if hasattr(e, 'read'):
log.log( utils_log.TRACE_LEVEL, ' . detail:\n%s', e.read() )
log.debug( '-'*60 )
log.debug( "Stack trace exception is detailed herafter:" )
exc_type, exc_value, exc_tb = sys.exc_info()
x = traceback.format_exception(exc_type, exc_value, exc_tb)
for stack in x:
log.debug( ' . %s', stack.replace('\n', '') )
log.debug( '-'*60 )
log.log( utils_log.TRACE_LEVEL, 'System info is provided hereafter:' )
system, node, release, version, machine, processor = platform.uname()
log.log( utils_log.TRACE_LEVEL, ' . system : %s', system )
log.log( utils_log.TRACE_LEVEL, ' . node : %s', node )
log.log( utils_log.TRACE_LEVEL, ' . release : %s', release )
log.log( utils_log.TRACE_LEVEL, ' . version : %s', version )
log.log( utils_log.TRACE_LEVEL, ' . machine : %s', machine )
log.log( utils_log.TRACE_LEVEL, ' . processor: %s', processor )
log.log( utils_log.TRACE_LEVEL, ' . python : %s', sys.version )
log.log( utils_log.TRACE_LEVEL, ' . client : %s', get_client_version() )
log.log( utils_log.TRACE_LEVEL, '-'*60 )
sys.exit(ERROR_CODE_EXIT)
finally:
log.debug( "Elapsed time : %s", str(datetime.datetime.now() - start_time) )
|
dipapaspyros/bdo_platform
|
aggregator/connectors/motu/client.py
|
Python
|
mit
| 13,497
|
[
"NetCDF"
] |
5d04ad8af76886ff019650dd3142bcccad484e85ccc6e0a47d587b733688ca27
|
from progressbar import ProgressBar, Percentage, SimpleProgress, Timer, AdaptiveETA, Bar, FormatLabel
import numpy as np
import itertools
import random
from phylo_utils import seq_to_partials
from phylo_utils.markov import TransitionMatrix
from phylo_utils.models import LG, WAG, GTR
from phylo_utils.likelihood import GammaMixture
from Bio.Seq import Seq, UnknownSeq
from Bio.SeqRecord import SeqRecord
from Bio.Align import MultipleSeqAlignment
from collections import defaultdict
__all__ = ['concatenate',
'flatten_list',
'symmetrise',
'regex_search_extract',
'setup_progressbar',
'model_translate',
'smooth_freqs',
'grouper',
'insort_no_dup',
'alignment_to_partials',
'biopython_to_partials',
'create_gamma_model',
'weighted_choice',
'sample_wr']
def concatenate(alignments):
"""
Concatenates a list of Bio.Align.MultipleSeqAlignment objects.
If any sequences are missing the are padded with unknown data
(Bio.Seq.UnknownSeq).
Returns a single Bio.Align.MultipleSeqAlignment.
Limitations: any annotations in the sub-alignments are lost in
the concatenated alignment.
"""
# Get the full set of labels (i.e. sequence ids) for all the alignments
all_labels = set(seq.id for aln in alignments for seq in aln)
# Make a dictionary to store info as we go along
# (defaultdict is convenient -- asking for a missing key gives back an empty list)
tmp = defaultdict(list)
for aln in alignments:
length = aln.get_alignment_length()
# check if any labels are missing in the current alignment
these_labels = set(rec.id for rec in aln)
missing = all_labels - these_labels
# if any are missing, create unknown data of the right length,
# stuff the string representation into the tmp dict
for label in missing:
new_seq = UnknownSeq(length)
tmp[label].append(str(new_seq))
# else stuff the string representation into the tmp dict
for rec in aln:
tmp[rec.id].append(str(rec.seq))
# Stitch all the substrings together using join (most efficient way),
# and build the Biopython data structures Seq, SeqRecord and MultipleSeqAlignment
msa = MultipleSeqAlignment(SeqRecord(Seq(''.join(v)), id=k, name=k, description=k)
for (k,v) in tmp.items())
return msa
def flatten_list(list_):
newlist = list()
x = newlist.extend
ap = newlist.append
for sublist in list_:
try:
x(sublist)
except TypeError: # if the "sublist" is non-iterable, append as a plain element
ap(sublist)
return newlist
def symmetrise(matrix, tri='upper'):
"""
Will copy the selected (upper or lower) triangle of a square matrix
to the opposite side, so that the matrix is symmetrical.
Alters in place.
"""
if tri == 'upper':
tri_fn = np.triu_indices
else:
tri_fn = np.tril_indices
size = matrix.shape[0]
matrix[tri_fn(size)[::-1]] = matrix[tri_fn(size)]
return matrix
def regex_search_extract(search_attempt):
return search_attempt.group() if search_attempt else None
def setup_progressbar(msg, size, format_label=None, simple_progress=False):
if not msg.endswith(': '):
msg += ': '
if simple_progress:
widgets = [msg,
SimpleProgress(), ' ',
Bar(), ' ',
Timer(), ' ',
AdaptiveETA()]
else:
widgets = [msg,
Percentage(), ' ',
Bar(), ' ',
Timer(), ' ',
AdaptiveETA()]
if format_label is not None:
widgets.append(FormatLabel(format_label))
pbar = ProgressBar(widgets=widgets, maxval=size)
return pbar
def model_translate(model):
translation = {'LG' : 'LG08',
'WAG': 'WAG01'}
return translation.get(model, model)
def smooth_freqs(freqs):
"""
Smooths freqs vector, guarantees sum == 1
:param freqs: vector of frequencies
:return: vector of frequencies guaranteed to sum to 1
"""
s = sum(freqs)
return [f/s for f in freqs]
def grouper(n, iterable):
"""
>>> list(grouper(3, 'ABCDEFG'))
[['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]
"""
iterable = iter(iterable)
return iter(lambda: list(itertools.islice(iterable, n)), [])
def insort_no_dup(lst, item):
"""
If item is not in lst, add item to list at its sorted position
"""
import bisect
ix = bisect.bisect_left(lst, item)
if lst[ix] != item:
lst[ix:ix] = [item]
def alignment_to_partials(alignment, missing_data=None):
""" Generate a partials dictionary from a treeCl.Alignment """
partials_dict = {}
for (name, sequence) in alignment.get_sequences():
datatype = 'dna' if alignment.is_dna() else 'protein'
partials_dict[name] = seq_to_partials(sequence, datatype)
if missing_data is not None:
l = len(alignment)
for name in missing_data:
if name not in partials_dict:
partials_dict[name] = seq_to_partials('-'*l, datatype)
return partials_dict
def biopython_to_partials(alignment, datatype):
""" Generate a partials dictionary from a treeCl.Alignment """
partials_dict = {}
for seq in alignment:
partials_dict[seq.name] = seq_to_partials(seq, datatype)
return partials_dict
def create_gamma_model(alignment, missing_data=None, ncat=4):
""" Create a phylo_utils.likelihood.GammaMixture for calculating
likelihood on a tree, from a treeCl.Alignment and its matching
treeCl.Parameters """
model = alignment.parameters.partitions.model
freqs = alignment.parameters.partitions.frequencies
alpha = alignment.parameters.partitions.alpha
if model == 'LG':
subs_model = LG(freqs)
elif model == 'WAG':
subs_model = WAG(freqs)
elif model == 'GTR':
rates = alignment.parameters.partitions.rates
subs_model = GTR(rates, freqs, True)
else:
raise ValueError("Can't handle this model: {}".format(model))
tm = TransitionMatrix(subs_model)
gamma = GammaMixture(alpha, ncat)
gamma.init_models(tm, alignment_to_partials(alignment, missing_data))
return gamma
def weighted_choice(choices):
total = sum(w for c, w in choices)
r = random.uniform(0, total)
upto = 0
for c, w in choices:
if upto + w > r:
return c
upto += w
assert False, "Shouldn't get here"
def sample_wr(lst):
"""
Sample from lst, with replacement
"""
arr = np.array(lst)
indices = np.random.randint(len(lst), size=len(lst))
sample = np.empty(arr.shape, dtype=arr.dtype)
for i, ix in enumerate(indices):
sample[i] = arr[ix]
return list(sample)
def binom_coeff(n):
"""
Calculate the binomial coefficient (n, 2), i.e. the number of distinct pairs possible
in a set of size n
:param n: size of set
:return: number of pairs
"""
return int(n * (n-1) / 2)
|
kgori/treeCl
|
treeCl/utils/misc.py
|
Python
|
mit
| 7,203
|
[
"Biopython"
] |
7cd0d13a8768d9b448b437772f1d4918ba8b653ba676b710ebc19b682833276c
|
# sql/visitors.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Visitor/traversal interface and library functions.
SQLAlchemy schema and expression constructs rely on a Python-centric
version of the classic "visitor" pattern as the primary way in which
they apply functionality. The most common use of this pattern
is statement compilation, where individual expression classes match
up to rendering methods that produce a string result. Beyond this,
the visitor system is also used to inspect expressions for various
information and patterns, as well as for usage in
some kinds of expression transformation. Other kinds of transformation
use a non-visitor traversal system.
For many examples of how the visit system is used, see the
sqlalchemy.sql.util and the sqlalchemy.sql.compiler modules.
For an introduction to clause adaption, see
http://techspot.zzzeek.org/2008/01/23/expression-transformations/
"""
from collections import deque
from .. import util
import operator
from .. import exc
__all__ = ['VisitableType', 'Visitable', 'ClauseVisitor',
'CloningVisitor', 'ReplacingCloningVisitor', 'iterate',
'iterate_depthfirst', 'traverse_using', 'traverse',
'traverse_depthfirst',
'cloned_traverse', 'replacement_traverse']
class VisitableType(type):
"""Metaclass which assigns a `_compiler_dispatch` method to classes
having a `__visit_name__` attribute.
The _compiler_dispatch attribute becomes an instance method which
looks approximately like the following::
def _compiler_dispatch (self, visitor, **kw):
'''Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.'''
visit_attr = 'visit_%s' % self.__visit_name__
return getattr(visitor, visit_attr)(self, **kw)
Classes having no __visit_name__ attribute will remain unaffected.
"""
def __init__(cls, clsname, bases, clsdict):
if clsname != 'Visitable' and \
hasattr(cls, '__visit_name__'):
_generate_dispatch(cls)
super(VisitableType, cls).__init__(clsname, bases, clsdict)
def _generate_dispatch(cls):
"""Return an optimized visit dispatch function for the cls
for use by the compiler.
"""
if '__visit_name__' in cls.__dict__:
visit_name = cls.__visit_name__
if isinstance(visit_name, str):
# There is an optimization opportunity here because the
# the string name of the class's __visit_name__ is known at
# this early stage (import time) so it can be pre-constructed.
getter = operator.attrgetter("visit_%s" % visit_name)
def _compiler_dispatch(self, visitor, **kw):
try:
meth = getter(visitor)
except AttributeError:
raise exc.UnsupportedCompilationError(visitor, cls)
else:
return meth(self, **kw)
else:
# The optimization opportunity is lost for this case because the
# __visit_name__ is not yet a string. As a result, the visit
# string has to be recalculated with each compilation.
def _compiler_dispatch(self, visitor, **kw):
visit_attr = 'visit_%s' % self.__visit_name__
try:
meth = getattr(visitor, visit_attr)
except AttributeError:
raise exc.UnsupportedCompilationError(visitor, cls)
else:
return meth(self, **kw)
_compiler_dispatch.__doc__ = \
"""Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.
"""
cls._compiler_dispatch = _compiler_dispatch
class Visitable(util.with_metaclass(VisitableType, object)):
"""Base class for visitable objects, applies the
``VisitableType`` metaclass.
"""
class ClauseVisitor(object):
"""Base class for visitor objects which can traverse using
the traverse() function.
"""
__traverse_options__ = {}
def traverse_single(self, obj, **kw):
for v in self._visitor_iterator:
meth = getattr(v, "visit_%s" % obj.__visit_name__, None)
if meth:
return meth(obj, **kw)
def iterate(self, obj):
"""traverse the given expression structure, returning an iterator
of all elements.
"""
return iterate(obj, self.__traverse_options__)
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return traverse(obj, self.__traverse_options__, self._visitor_dict)
@util.memoized_property
def _visitor_dict(self):
visitors = {}
for name in dir(self):
if name.startswith('visit_'):
visitors[name[6:]] = getattr(self, name)
return visitors
@property
def _visitor_iterator(self):
"""iterate through this visitor and each 'chained' visitor."""
v = self
while v:
yield v
v = getattr(v, '_next', None)
def chain(self, visitor):
"""'chain' an additional ClauseVisitor onto this ClauseVisitor.
the chained visitor will receive all visit events after this one.
"""
tail = list(self._visitor_iterator)[-1]
tail._next = visitor
return self
class CloningVisitor(ClauseVisitor):
"""Base class for visitor objects which can traverse using
the cloned_traverse() function.
"""
def copy_and_process(self, list_):
"""Apply cloned traversal to the given list of elements, and return
the new list.
"""
return [self.traverse(x) for x in list_]
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return cloned_traverse(
obj, self.__traverse_options__, self._visitor_dict)
class ReplacingCloningVisitor(CloningVisitor):
"""Base class for visitor objects which can traverse using
the replacement_traverse() function.
"""
def replace(self, elem):
"""receive pre-copied elements during a cloning traversal.
If the method returns a new element, the element is used
instead of creating a simple copy of the element. Traversal
will halt on the newly returned element if it is re-encountered.
"""
return None
def traverse(self, obj):
"""traverse and visit the given expression structure."""
def replace(elem):
for v in self._visitor_iterator:
e = v.replace(elem)
if e is not None:
return e
return replacement_traverse(obj, self.__traverse_options__, replace)
def iterate(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be breadth-first.
"""
# fasttrack for atomic elements like columns
children = obj.get_children(**opts)
if not children:
return [obj]
traversal = deque()
stack = deque([obj])
while stack:
t = stack.popleft()
traversal.append(t)
for c in t.get_children(**opts):
stack.append(c)
return iter(traversal)
def iterate_depthfirst(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be depth-first.
"""
# fasttrack for atomic elements like columns
children = obj.get_children(**opts)
if not children:
return [obj]
stack = deque([obj])
traversal = deque()
while stack:
t = stack.pop()
traversal.appendleft(t)
for c in t.get_children(**opts):
stack.append(c)
return iter(traversal)
def traverse_using(iterator, obj, visitors):
"""visit the given expression structure using the given iterator of
objects.
"""
for target in iterator:
meth = visitors.get(target.__visit_name__, None)
if meth:
meth(target)
return obj
def traverse(obj, opts, visitors):
"""traverse and visit the given expression structure using the default
iterator.
"""
return traverse_using(iterate(obj, opts), obj, visitors)
def traverse_depthfirst(obj, opts, visitors):
"""traverse and visit the given expression structure using the
depth-first iterator.
"""
return traverse_using(iterate_depthfirst(obj, opts), obj, visitors)
def cloned_traverse(obj, opts, visitors):
"""clone the given expression structure, allowing
modifications by visitors."""
cloned = {}
stop_on = set(opts.get('stop_on', []))
def clone(elem):
if elem in stop_on:
return elem
else:
if id(elem) not in cloned:
cloned[id(elem)] = newelem = elem._clone()
newelem._copy_internals(clone=clone)
meth = visitors.get(newelem.__visit_name__, None)
if meth:
meth(newelem)
return cloned[id(elem)]
if obj is not None:
obj = clone(obj)
return obj
def replacement_traverse(obj, opts, replace):
"""clone the given expression structure, allowing element
replacement by a given replacement function."""
cloned = {}
stop_on = set([id(x) for x in opts.get('stop_on', [])])
def clone(elem, **kw):
if id(elem) in stop_on or \
'no_replacement_traverse' in elem._annotations:
return elem
else:
newelem = replace(elem)
if newelem is not None:
stop_on.add(id(newelem))
return newelem
else:
if elem not in cloned:
cloned[elem] = newelem = elem._clone()
newelem._copy_internals(clone=clone, **kw)
return cloned[elem]
if obj is not None:
obj = clone(obj, **opts)
return obj
|
pcu4dros/pandora-core
|
workspace/lib/python3.5/site-packages/sqlalchemy/sql/visitors.py
|
Python
|
mit
| 10,271
|
[
"VisIt"
] |
e22a46bc092a15a4805a0c8db8a8f1e71fe6b3c188cbd6aafb00b9a63e3e6778
|
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT=1
from __future__ import annotations
import json
import math
import os
import sys
import matplotlib
import iotbx.phil
from cctbx import crystal, miller
from cctbx.array_family import flex
from scitbx import matrix
import dials.util
help_message = """
Calculates a stereographic projection image for the given crystal models and
the given miller indices (either specified individually, or for all miller indices
up to a given hkl_limit). By default the projection is in the plane
perpendicular to 0,0,1 reflection for the first crystal, however the projection
can optionally be performed in the laboratory frame (frame=laboratory) in the
plane perpendicular to the beam. Setting the parameter expand_to_p1=True will
also plot all symmetry equivalents of the given miller indices, and
eliminate_sys_absent=False will eliminate systematically absent reflections
before generating the projection.
Examples::
dials.stereographic_projection indexed.expt hkl=1,0,0 hkl=0,1,0
dials.stereographic_projection indexed.expt hkl_limit=2
dials.stereographic_projection indexed_1.expt indexed_2.expt hkl=1,0,0 expand_to_p1=True
"""
phil_scope = iotbx.phil.parse(
"""
hkl = None
.type = ints(size=3)
.multiple=True
hkl_limit = None
.type = int(value_min=1)
expand_to_p1 = True
.type = bool
.help = "Expand the given miller indices to symmetry equivalent reflections"
eliminate_sys_absent = False
.type = bool
.help = "Eliminate systematically absent reflections"
frame = *laboratory crystal
.type = choice
phi_angle = 0
.type = float
.help = "Phi rotation angle (degrees)"
use_starting_angle = False
.type = bool
.help = "If True, then the projection will be done for each crystal at the "
"starting phi angle for the scan associated with the crystal."
plane_normal = None
.type = ints(size=3)
save_coordinates = True
.type = bool
plot {
filename = stereographic_projection.png
.type = path
label_indices = False
.type = bool
colours = None
.type = strings
marker_size = 3
.type = int(value_min=1)
font_size = 6
.type = float(value_min=0)
colour_map = None
.type = str
gridsize = None
.type = int
labels = None
.type = strings
}
json {
filename = None
.type = path
}
"""
)
def reference_poles_perpendicular_to_beam(beam, goniometer):
# plane normal
d0 = matrix.col(beam.get_s0()).normalize()
if goniometer is not None:
d1 = d0.cross(matrix.col(goniometer.get_rotation_axis())).normalize()
else:
d1 = d0.ortho()
d2 = d1.cross(d0).normalize()
return (d0, d1, d2)
def reference_poles_crystal(crystal_model, plane_normal=(0, 0, 1)):
A = matrix.sqr(crystal_model.get_A())
B = matrix.sqr(crystal_model.get_B())
A_inv = A.inverse()
G = A_inv * A_inv.transpose()
G_star = A.transpose() * A
h0 = (G * matrix.col(plane_normal)).normalize()
h1 = matrix.col((1, 0, 0)).cross((G_star * h0).normalize())
h2 = (G_star * h1).cross(G_star * h0).normalize()
return tuple((B * h).normalize() for h in (h0, h1, h2))
def stereographic_projection(points, reference_poles):
# https://doi.org/10.1107/S0021889868005029
# J. Appl. Cryst. (1968). 1, 68-70
# The construction of stereographic projections by computer
# G. K. Stokes, S. R. Keown and D. J. Dyson
assert len(reference_poles) == 3
r_0, r_1, r_2 = reference_poles
projections = flex.vec2_double()
for p in points:
r_i = matrix.col(p)
# theta is the angle between r_i and the plane normal, r_0
cos_theta = r_i.cos_angle(r_0)
if cos_theta < 0:
r_i = -r_i
cos_theta = r_i.cos_angle(r_0)
# alpha is the angle between r_i and r_1
cos_alpha = r_i.cos_angle(r_1)
theta = math.acos(cos_theta)
cos_phi = cos_alpha / math.sin(theta)
if abs(cos_phi) > 1:
cos_phi = math.copysign(1, cos_phi)
phi = math.acos(cos_phi)
N = r_i.dot(r_2)
r = math.tan(theta / 2)
x = r * cos_phi
y = r * math.sin(phi)
y = math.copysign(y, N)
projections.append((x, y))
return projections
def gcd_list(l):
# greatest common divisor for a list of numbers
from scitbx.math import gcd_int_simple as gcd
result = l[0]
for i in range(1, len(l)):
result = gcd(result, l[i])
return result
@dials.util.show_mail_handle_errors()
def run(args=None):
from dials.util.options import ArgumentParser, flatten_experiments
# The script usage
usage = "dials.stereographic_projection [options] [param.phil] indexed.expt"
parser = ArgumentParser(
usage=usage,
phil=phil_scope,
read_experiments=True,
check_format=False,
epilog=help_message,
)
params, options = parser.parse_args(args=args, show_diff_phil=True)
experiments = flatten_experiments(params.input.experiments)
if not experiments:
parser.print_help()
return
if not params.hkl and params.hkl_limit is None:
sys.exit("Please provide hkl or hkl_limit parameters.")
if params.plot.labels and len(params.plot.labels) != len(experiments):
sys.exit(
"Number of labels (%i) must equal number of experiments (%i)"
% (len(params.plot.labels), len(experiments))
)
if params.hkl is not None and len(params.hkl):
miller_indices = flex.miller_index(params.hkl)
elif params.hkl_limit is not None:
limit = params.hkl_limit
miller_indices = flex.miller_index()
for h in range(-limit, limit + 1):
for k in range(-limit, limit + 1):
for l in range(-limit, limit + 1):
if (h, k, l) == (0, 0, 0):
continue
miller_indices.append((h, k, l))
crystals = experiments.crystals()
symmetry = crystal.symmetry(
unit_cell=crystals[0].get_unit_cell(), space_group=crystals[0].get_space_group()
)
miller_set = miller.set(symmetry, miller_indices)
d_spacings = miller_set.d_spacings()
if params.eliminate_sys_absent:
d_spacings = d_spacings.eliminate_sys_absent()
if params.expand_to_p1:
d_spacings = d_spacings.as_non_anomalous_array().expand_to_p1()
miller_indices = d_spacings.indices()
# find the greatest common factor (divisor) between miller indices
miller_indices_unique = flex.miller_index()
for hkl in miller_indices:
gcd = gcd_list(hkl)
if gcd > 1:
miller_indices_unique.append(tuple(int(h / gcd) for h in hkl))
elif gcd < 1:
pass
else:
miller_indices_unique.append(hkl)
miller_indices = miller_indices_unique
miller_indices = flex.miller_index(list(set(miller_indices)))
ref_crystal = crystals[0]
U = matrix.sqr(ref_crystal.get_U())
B = matrix.sqr(ref_crystal.get_B())
R = matrix.identity(3)
if params.frame == "laboratory":
reference_poles = reference_poles_perpendicular_to_beam(
experiments[0].beam, experiments[0].goniometer
)
if params.use_starting_angle:
rotation_axis = matrix.col(experiments[0].goniometer.get_rotation_axis())
R = rotation_axis.axis_and_angle_as_r3_rotation_matrix(
experiments[0].scan.get_oscillation()[0], deg=True
)
elif params.phi_angle != 0:
rotation_axis = matrix.col(experiments[0].goniometer.get_rotation_axis())
R = rotation_axis.axis_and_angle_as_r3_rotation_matrix(
params.phi_angle, deg=True
)
else:
if params.plane_normal is not None:
plane_normal = params.plane_normal
else:
plane_normal = (0, 0, 1)
reference_poles = reference_poles_crystal(
ref_crystal, plane_normal=plane_normal
)
if params.frame == "crystal":
U = matrix.identity(3)
reciprocal_space_points = list(R * U * B) * miller_indices.as_vec3_double()
projections_ref = stereographic_projection(reciprocal_space_points, reference_poles)
projections_all = [projections_ref]
if experiments:
from dials.algorithms.indexing.compare_orientation_matrices import (
difference_rotation_matrix_axis_angle,
)
for expt in experiments[1:]:
cryst = expt.crystal
if params.frame == "crystal":
R_ij, axis, angle, cb_op = difference_rotation_matrix_axis_angle(
ref_crystal, cryst
)
U = R_ij
elif params.use_starting_angle:
if params.use_starting_angle:
rotation_axis = matrix.col(expt.goniometer.get_rotation_axis())
R = rotation_axis.axis_and_angle_as_r3_rotation_matrix(
expt.scan.get_oscillation()[0], deg=True
)
else:
U = matrix.sqr(cryst.get_U())
reciprocal_space_points = (
list(R * U * matrix.sqr(cryst.get_B()))
* miller_indices.as_vec3_double()
)
projections = stereographic_projection(
reciprocal_space_points, reference_poles
)
projections_all.append(projections)
if params.save_coordinates:
with open("projections.txt", "w") as f:
f.write("crystal h k l x y" + os.linesep)
for i_cryst, projections in enumerate(projections_all):
for hkl, proj in zip(miller_indices, projections):
f.write("%i " % (i_cryst + 1))
f.write("%i %i %i " % hkl)
f.write(("%f %f" + os.linesep) % proj)
if params.plot.filename:
epochs = None
if params.plot.colour_map is not None:
if experiments[0].scan is not None:
epochs = [expt.scan.get_epochs()[0] for expt in experiments]
else:
epochs = [i for i, expt in enumerate(experiments)]
plot_projections(
projections_all,
filename=params.plot.filename,
colours=params.plot.colours,
marker_size=params.plot.marker_size,
font_size=params.plot.font_size,
gridsize=params.plot.gridsize,
label_indices=miller_indices if params.plot.label_indices else False,
epochs=epochs,
colour_map=params.plot.colour_map,
)
if params.json.filename:
projections_as_json(
projections_all, filename=params.json.filename, labels=params.plot.labels
)
def plot_projections(
projections,
filename=None,
colours=None,
marker_size=3,
font_size=6,
gridsize=None,
label_indices=False,
epochs=None,
colour_map=None,
):
projections_all = projections
# http://matplotlib.org/faq/howto_faq.html#generate-images-without-having-a-window-appear
matplotlib.use("Agg") # use a non-interactive backend
from matplotlib import pylab, pyplot
if epochs is not None and colour_map is not None:
epochs = flex.double(epochs)
epochs -= flex.min(epochs)
epochs /= flex.max(epochs)
cmap = matplotlib.cm.get_cmap(colour_map)
colours = [cmap(e) for e in epochs]
elif colours is None or len(colours) == 0:
colours = ["b"] * len(projections_all)
elif len(colours) < len(projections_all):
colours = colours * len(projections_all)
fig = pyplot.figure()
pyplot.scatter([0], [0], marker="+", c="0.75", s=100)
cir = pylab.Circle((0, 0), radius=1.0, fill=False, color="0.75")
pylab.gca().add_patch(cir)
if gridsize is not None:
x = flex.double()
y = flex.double()
for i, projections in enumerate(projections_all):
x_, y_ = projections.parts()
x.extend(x_)
y.extend(y_)
hb = pyplot.hexbin(x, y, gridsize=gridsize, linewidths=0.2)
pyplot.colorbar(hb)
else:
for i, projections in enumerate(projections_all):
x, y = projections.parts()
pyplot.scatter(
x.as_numpy_array(),
y.as_numpy_array(),
c=colours[i],
s=marker_size,
edgecolors="none",
)
if label_indices:
for j, (hkl, proj) in enumerate(zip(label_indices, projections)):
# hack to not write two labels on top of each other
p1, p2 = (projections - proj).parts()
if (flex.sqrt(flex.pow2(p1) + flex.pow2(p2)) < 1e-3).iselection()[
0
] != j:
continue
pyplot.text(proj[0], proj[1], str(hkl), fontsize=font_size)
fig.axes[0].set_aspect("equal")
pyplot.xlim(-1.1, 1.1)
pyplot.ylim(-1.1, 1.1)
if filename is not None:
pyplot.savefig(filename, dpi=300)
def projections_as_dict(projections, labels):
projections_all = flex.vec2_double()
if labels:
labels_all = []
assert len(projections) == len(labels)
for i, proj in enumerate(projections):
projections_all.extend(proj)
if labels:
labels_all.extend([labels[i]] * len(proj))
data = []
x, y = projections_all.parts()
data.append(
{
"x": list(x),
"y": list(y),
"mode": "markers",
"type": "scatter",
"name": "stereographic_projections",
"showlegend": False,
"hovertext": labels_all if labels else "",
"hoverinfo": "text",
}
)
data.append(
{
"x": [0],
"y": [0],
"mode": "markers",
"marker": {
"color": "black",
"size": 25,
"symbol": "cross-thin",
"line": {"width": 1},
},
"showlegend": False,
}
)
d = {
"data": data,
"layout": {
"title": "Stereographic projections",
"hovermode": "closest",
"xaxis": {
"range": [-1.0, 1.0],
"showgrid": False,
"zeroline": False,
"showline": False,
"ticks": "",
"showticklabels": False,
},
"yaxis": {
"range": [-1.0, 1.0],
"showgrid": False,
"zeroline": False,
"showline": False,
"ticks": "",
"showticklabels": False,
},
"shapes": [
{
"type": "circle",
"xref": "x",
"yref": "y",
"x0": -1,
"y0": -1,
"x1": 1,
"y1": 1,
"line": {"color": "black"},
}
],
},
}
return d
def projections_as_json(projections, filename=None, labels=None):
d = projections_as_dict(projections, labels=labels)
json_str = json.dumps(d)
if filename is not None:
with open(filename, "w") as f:
f.write(json_str)
return json_str
if __name__ == "__main__":
run()
|
dials/dials
|
command_line/stereographic_projection.py
|
Python
|
bsd-3-clause
| 15,500
|
[
"CRYSTAL"
] |
52ffdd5231f9f41c5f19f00e32c318b481f163b56ad39522688c4f558a4a2a7b
|
# Python Imports
import uuid
import os
import json
import time
import re
import numpy as np
now = time.time # shortcut
# Twisted Imports
from twisted.internet import defer, threads, task
from twisted.python import log
from twisted.python.filepath import FilePath
from twisted.logger import Logger
# Octopus Imports
from octopus.sequence.error import AlreadyRunning, NotRunning
from octopus.events import EventEmitter
# Package Imports
from .database.dbutil import makeFinder
class Experiment (EventEmitter):
""" This object is the representation of a running experiment,
stored in the database and in the event store. """
db = None
dataDir = None
log = Logger()
@classmethod
def exists (cls, id):
d = cls.db.runQuery("SELECT guid FROM experiments WHERE guid = ?", (id,))
d.addCallback(lambda r: len(r) > 0)
return d
@classmethod
def delete (cls, id):
return cls.db.runOperation("UPDATE experiments SET deleted = 1 WHERE guid = ?", (id, ))
@classmethod
def restore (cls, id):
return cls.db.runOperation("UPDATE experiments SET deleted = 0 WHERE guid = ?", (id, ))
def __init__ (self, sketch):
id = str(uuid.uuid4())
self.id = id
self.short_id = id.split('-')[0]
self.sketch = sketch
self.logMessages = []
self.log.debug(
"Creating experiment {log_source.short_id!s} for Sketch {log_source.sketch.id!s}"
)
@defer.inlineCallbacks
def run (self):
""" Run the experiment.
Main method to run an experiment. Returns a Deferred which
calls back when the experiment is complete, or errs back
if there is an error in the experiment.
Whilst the experiment is running, the pause(), resume() and
stop() methods can be used to interact with it. (Note: stop()
will cause the Deferred returned by run() to errback).
This method:
1. Inserts an entry for the experiment into the database.
2. Creates a directory for the experiment data.
3. Takes a snapshot of the sketch and stores it in the directory.
4. Sets event listeners to record any sketch changes during the
experiment.
5. Records all changes to variables in the workspace
during the experiment.
"""
id = self.id
sketch = self.sketch
sketch_id = sketch.id
workspace = sketch.workspace
self.log.debug(
"Running experiment {log_source.id!s}",
)
# If the workspace is already running, we can't run another
# experiment on top of it. No experiment entry in the database
# will be created.
try:
yield workspace.reset()
except AlreadyRunning:
self.log.debug("Experiment {log_source.short_id!s} was already running. Abort & reset...")
yield workspace.abort()
yield workspace.reset()
self.startTime = now()
# Insert the new experiment into the DB.
yield self.db.runOperation("""
INSERT INTO experiments
(guid, sketch_guid, title, user_id, started_date)
VALUES (?, ?, ?, ?, ?)
""",
(id, sketch_id, sketch.title, 1, self.startTime)
)
self.log.debug("Experiment {log_source.short_id!s} inserted into database.")
# Create a directory to store the experiment logs and data.
stime = time.gmtime(self.startTime)
self._experimentDir = FilePath(self.dataDir)
for segment in [stime.tm_year, stime.tm_mon, stime.tm_mday, id]:
self._experimentDir = self._experimentDir.child(str(segment))
if not self._experimentDir.exists():
self._experimentDir.createDirectory()
self.log.debug(
"Experiment {log_source.short_id!s} directory {dir!s} created.",
dir = self._experimentDir
)
# Create files for the sketch logs, snapshot, variables etc.
eventFile = self._experimentDir.child("events.log").create()
sketchFile = self._experimentDir.child("sketch.log").create()
snapFile = self._experimentDir.child("sketch.snapshot.log")
varsFile = self._experimentDir.child("variables")
openFiles = { "_events": eventFile, "_sketch": sketchFile }
usedFiles = {}
self.log.debug(
"Experiment {log_source.short_id!s} log files created."
)
# Write a snapshot of the sketch.
with snapFile.create() as fp:
fp.write("\n".join(map(json.dumps, workspace.toEvents())).encode('utf-8'))
self.log.debug(
"Experiment {log_source.short_id!s} snapshot created in {file!s}.",
file = snapFile
)
# Log events emitted by the sketch (block changes, etc.)
# The idea is that with the snapshot and change log, the
# layout of the sketch could be replayed over the period
# of the experiment.
def onSketchEvent (protocol, topic, data):
self.log.debug(
"Experiment {log_source.short_id!s}: sketch event: {protocol}, {topic}, {data}",
protocol = protocol,
topic = topic,
data = data
)
# Don't log block state events to the sketch log
# (there will be lots, and they are not relevant to the
# structure of the sketch)
if protocol == "block" and topic == "state":
return
# If the sketch is renamed whilst the experiment is
# running, update the experiment title.
elif protocol == "sketch" and topic == "renamed":
self.db.runOperation("""
UPDATE experiments SET title = ? WHERE guid = ?
""", (data['title'], id)).addErrback(log.err)
writeEvent(sketchFile, protocol, topic, data)
# Helper function to format an event and write it to the file.
def writeEvent (file, protocol, topic, data):
time = now()
event = {
"time": time,
"relative": time - self.startTime,
"protocol": protocol,
"topic": topic,
"data": data
}
file.write((json.dumps(event) + "\n").encode('utf-8'))
sketch.subscribe(self, onSketchEvent)
# Subscribe to workspace events. Block states are written to
# the events log.
@workspace.on("block-state")
def onBlockStateChange (data):
self.log.debug(
"Experiment {log_source.short_id!s}: block {block_id} state -> {state}",
block_id = data['block'],
state = data['state']
)
writeEvent(eventFile, "block", "state", data)
data['sketch'] = sketch_id
data['experiment'] = id
sketch.notifySubscribers("block", "state", data, self)
# Log messages are written to the events log, and also
# broadcast to subscribers. The experiment keeps a record
# of events so that new clients can get a historical log.
@workspace.on("log-message")
def onLogMessage (data):
self.log.debug(
"Experiment {log_source.short_id!s}: log message: {message}",
message = data['message']
)
writeEvent(eventFile, "experiment", "log", data)
data['sketch'] = sketch_id
data['experiment'] = id
data['time'] = round(now(), 2)
self.logMessages.append(data)
sketch.notifySubscribers("experiment", "log", data, self)
# Log changes to variable data.
#
# Note: files are only created when a variable actually gets data,
# not necessarily when the variable is created.
#
# The relative time is written, to save filesize. The absolute
# time can be calculated using the start time at the top of the file.
@workspace.variables.on("variable-changed")
def onVarChanged (data):
self.log.debug(
"Experiment {log_source.short_id!s}: variable {variable_name} value -> {value}",
variable_name = data['name'],
value = data['value']
)
try:
logFile = openFiles[data['name']]
except KeyError:
varName = unusedVarName(data['name'])
fileName = fileNameFor(varName)
logFile = self._experimentDir.child(fileName).create()
openFiles[varName] = logFile
addUsedFile(varName, fileName, workspace.variables.get(data['name']))
logFile.write(
f"# name:{data['name']}\n# type:{type(data['value']).__name__} \n# start:{self.startTime:.2f}\n".encode('utf-8')
)
logFile.write(f"{data['time'] - self.startTime:.2f}, {data['value']}\n".encode('utf-8'))
# Update the open files list if a variable is renamed.
#
# TODO: Variable renaming during experiment run is a bit dodgy.
# Which variable to display in the results? It might be better to
# disallow renaming during runtime.
@workspace.variables.on("variable-renamed")
def onVarRenamed (data):
self.log.debug(
"Experiment {log_source.short_id!s}: variable {old_name} renamed -> {new_name}",
old_name = data['oldName'],
new_name = data['newName']
)
openFiles[data['newName']] = openFiles[data['oldName']]
del openFiles[data['oldName']]
addUsedFile(data['newName'], "", data['variable'])
# Ensure that renaming vars doesn't lead to any overwriting.
# (see TODO above).
def unusedVarName (varName):
if varName in usedFiles:
return unusedVarName(varName + "_")
return varName
# Format a variable name into a file name
def fileNameFor (varName):
return re.sub(r'[^a-z0-9\.]', '_', varName) + '.csv'
# Build a list of files and variables to be written to the variables
# list file, which is used to generate the var list
# when the experiment results are being displayed.
def addUsedFile (varName, fileName, variable):
try:
unit = str(variable.unit)
except AttributeError:
unit = ""
if fileName != "":
usedFiles[varName] = {
"name": varName,
"type": variable.type.__name__,
"unit": unit,
"file": fileName
}
else:
usedFiles[varName] = {}
# Write all file data. This is called periodically during the
# experiment so that data variables that do not change very
# often is still written to disk, and will not be lost if the
# program crashes.
def flushFiles ():
try:
for file in openFiles.values():
file.flush()
os.fsync(file.fileno())
except:
log.err()
flushFilesLoop = task.LoopingCall(flushFiles)
flushFilesLoop.start(5 * 60, False).addErrback(log.err)
# Attempt to run the experiment. Make sure that eveything is
# cleaned up after the experiment, even in the event of an error.
try:
self.log.debug("Experiment {log_source.short_id!s}: Running workspace")
yield workspace.run()
self.log.debug("Experiment {log_source.short_id!s}: Workspace completed")
finally:
# Remove event handlers
sketch.unsubscribe(self)
workspace.off("block-state", onBlockStateChange)
workspace.off("log-message", onLogMessage)
workspace.variables.off("variable-changed", onVarChanged)
workspace.variables.off("variable-renamed", onVarRenamed)
# Close file pointers
with varsFile.create() as fp:
fp.write(json.dumps(usedFiles).encode('utf-8'))
try:
flushFilesLoop.stop()
except:
log.err()
for file in openFiles.values():
file.close()
# Store completed time for experiment.
self.db.runOperation("""
UPDATE experiments SET finished_date = ? WHERE guid = ?
""", (now(), id)).addErrback(log.err)
self.log.debug("Experiment {log_source.short_id!s}: Set completed in database")
def pause (self):
"""Pause the experiment if it is running.
Throws an error if called when the experiment is not running.
"""
return self.sketch.workspace.pause()
def resume (self):
"""Resume the experiment if it is paused.
Throws an error if called when the experiment is not paused.
"""
return self.sketch.workspace.resume()
def stop (self):
"""Abort the experiment.
Causes the Deferred returned from run() to errback.
Throws an error if called when the experiment is not running.
"""
return self.sketch.workspace.abort()
#
# Subscribers
#
def variables (self):
from octopus.machine import Component
from octopus.data.data import BaseVariable
variables = {}
for name, var in self.sketch.workspace.variables.items():
if isinstance(var, Component):
variables.update(var.variables)
elif isinstance(var, BaseVariable):
variables[name] = var
return variables
find = makeFinder(
Experiment,
'experiments',
{
'guid': { 'type': str },
'sketch_guid': { 'type': str },
'title': {
'type': str,
'modifier': lambda x: '%' + x + '%',
'operator': ' LIKE ?'
},
'user_id': { 'type': int },
'started_date': { 'type': int },
'finished_date': { 'type': int },
'duration': {
'type': int,
'sql': '(finished_date - started_date) AS duration'
},
'deleted': { 'type': bool }
}
)
class CompletedExperiment (object):
def __init__ (self, id):
self.id = id
@defer.inlineCallbacks
def load (self):
expt = yield self._fetchFromDb(self.id)
experimentDir = self._getExperimentDir(self.id, expt['started_date'])
self.title = expt['sketch_title']
self.date = expt['started_date']
self.started_date = expt['started_date']
self.finished_date = expt['finished_date']
self.sketch_id = expt['sketch_guid']
def _varName (name):
if '::' in name:
return '.'.join(name.split('::')[1:])
else:
return name
variables = yield self._getVariables(experimentDir)
self.variables = [
{
"key": v["name"],
"name": _varName(v["name"]),
"type": v["type"],
"unit": v["unit"]
}
for v in variables.values()
if "name" in v
]
@defer.inlineCallbacks
def loadData (self, variables, start, end):
date = yield self._fetchDateFromDb(self.id)
experimentDir = self._getExperimentDir(self.id, date)
storedVariablesData = yield self._getVariables(experimentDir)
data = yield defer.gatherResults(map(
lambda variable: self._getData(
experimentDir.child(variable["file"]),
variable["name"],
variable["type"],
start,
end
),
map(lambda name: storedVariablesData[name], variables)
))
defer.returnValue(data)
@defer.inlineCallbacks
def buildExcelFile (self, variables, time_divisor, time_dp):
import pandas as pd
from io import BytesIO
date = yield self._fetchDateFromDb(self.id)
experimentDir = self._getExperimentDir(self.id, date)
storedVariablesData = yield self._getVariables(experimentDir)
bio = BytesIO()
# https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#writing-excel-files-to-memory
writer = pd.ExcelWriter(bio, engine='xlsxwriter')
def varName (variable):
""" Generates a column title from a variable name """
if variable["unit"] != '':
unit = ' (' + variable["unit"] + ')'
else:
unit = ''
if '::' in variable["name"]:
name = '.'.join(variable["name"].split('::')[1:])
else:
name = variable["name"]
return name + unit
# Read data for each requested variable
cols = yield defer.gatherResults(map(
lambda variable: threads.deferToThread(
pd.read_csv,
experimentDir.child(variable["file"]).path,
comment = '#',
index_col = 0,
usecols = [0, 1],
names = ["Time", varName(variable)]
),
map(lambda name: storedVariablesData[name.decode('ascii')], variables)
))
# Convert the columns into a single DataFrame
dataframe = pd.concat(cols, axis = 1)
# Ensure there is a datapoint at each time point. Fill rather than
# interpolate to maintain greatest data fidelity.
dataframe.fillna(method = 'pad', inplace = True)
# Reduce the number of datapoints according to time_divisor / time_dp
# This is done over the entire dataframe, after filling empty values,
# so that all property values are retained.
def format_time (x):
if x != "":
return round(float(x) / time_divisor, time_dp)
dataframe = dataframe.groupby(format_time).first()
# Remove invalid chars from expt title for Excel sheet title
sheet_title = re.sub(r'[\[\]\*\/\\\?]+', '', self.title)[0:30]
# Generate excel file
dataframe.to_excel(writer, sheet_name = sheet_title)
writer.save()
# Seek to the beginning and read to copy the workbook to a variable in memory
bio.seek(0)
defer.returnValue(bio.read())
def _fetchFromDb (self, id):
def _done (rows):
try:
row = rows[0]
except KeyError:
return None
return {
'guid': str(row[0]),
'sketch_guid': str(row[1]),
'user_id': int(row[2]),
'started_date': int(row[3]),
'finished_date': int(row[4]),
'sketch_title': str(row[5])
}
return Experiment.db.runQuery("""
SELECT guid, sketch_guid, user_id, started_date, finished_date, title
FROM experiments
WHERE guid = ?
""", (id, )).addCallback(_done)
def _fetchDateFromDb (self, id):
def _done (rows):
try:
return int(rows[0][0])
except KeyError:
return None
return Experiment.db.runQuery("""
SELECT started_date
FROM experiments
WHERE guid = ?
""", (id, )).addCallback(_done)
def _getExperimentDir (self, id, startTime):
stime = time.gmtime(startTime)
experimentDir = FilePath(Experiment.dataDir)
for segment in [stime.tm_year, stime.tm_mon, stime.tm_mday, id]:
experimentDir = experimentDir.child(str(segment))
if not experimentDir.exists():
return None
return experimentDir
@defer.inlineCallbacks
def _getVariables (self, experimentDir):
varsFile = experimentDir.child("variables")
try:
content = yield threads.deferToThread(varsFile.getContent)
variables = json.loads(content)
except:
log.err()
variables = {}
defer.returnValue(variables)
@defer.inlineCallbacks
def _getData (self, dataFile, name, var_type, start = None, end = None):
if var_type == "int":
cast = int
elif var_type == "float":
cast = float
else:
cast = str
if end is None:
start = None
def _readFile ():
data = []
with dataFile.open() as fp:
for line in fp:
# Skip comments
if line[0] == 35:
# b'#' == 35
continue
time, value = line.split(b',')
time = float(time)
if start is not None:
if time < start:
continue
if time > end:
break
data.append((time, cast(value.decode())))
return data
try:
data = yield threads.deferToThread(_readFile)
except:
log.err()
defer.returnValue({})
# Make a readable variable name
#var_name = '.'.join(name.split('::')[1:])
if len(data) > 400 and cast in (int, float):
if end is None:
try:
interval = data[-1][0] - data[0][0]
except IndexError:
interval = 0
else:
interval = end - start
spread = max(data, key = lambda x: x[1])[1] - min(data, key = lambda x: x[1])[1]
print ("Simplifying data with interval " + str(interval) + " (currently %s points)" % len(data))
print ("Spread: %s" % spread)
print ("Epsilon: %s" % min(interval / 200., spread / 50.))
data = rdp(data, epsilon = min(interval / 200., spread / 50.))
print (" -> %s points" % len(data))
defer.returnValue({
'name': name,
'type': var_type,
'data': data
})
from math import sqrt
def distance(a, b):
return sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
def point_line_distance(point, start, end):
if (start == end):
return distance(point, start)
else:
n = abs(
(end[0] - start[0]) * (start[1] - point[1]) - (start[0] - point[0]) * (end[1] - start[1])
)
d = sqrt(
(end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2
)
return n / d
def rdp(points, epsilon):
"""
Reduces a series of points to a simplified version that loses detail, but
maintains the general shape of the series.
"""
dmax = 0.0
index = 0
for i in range(1, len(points) - 1):
d = point_line_distance(points[i], points[0], points[-1])
if d > dmax:
index = i
dmax = d
if dmax >= epsilon:
results = rdp(points[:index+1], epsilon)[:-1] + rdp(points[index:], epsilon)
else:
results = [points[0], points[-1]]
return results
|
richardingham/octopus
|
octopus/blocktopus/experiment.py
|
Python
|
mit
| 19,197
|
[
"Octopus"
] |
7e508557f813f43ae2c82496f06d0d9b7af9d0e776dbf5ff6d372e8e2544bbe6
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Unit tests for writer xyzwriter module."""
import os
import unittest
import cclib
__filedir__ = os.path.dirname(__file__)
__filepath__ = os.path.realpath(__filedir__)
__datadir__ = os.path.join(__filepath__, "..", "..")
class XYZTest(unittest.TestCase):
def setUp(self):
self.XYZ = cclib.io.XYZ
def test_init(self):
"""Does the class initialize correctly?"""
fpath = os.path.join(__datadir__, "data/ADF/basicADF2007.01/dvb_gopt.adfout")
data = cclib.io.ccopen(fpath).parse()
xyz = cclib.io.xyzwriter.XYZ(data)
# The object should keep the ccData instance passed to its constructor.
self.assertEqual(xyz.ccdata, data)
if __name__ == "__main__":
unittest.main()
|
Schamnad/cclib
|
test/io/testxyzwriter.py
|
Python
|
bsd-3-clause
| 940
|
[
"ADF",
"cclib"
] |
468038157364d5011fd7a70bcb100b613ab18a7c26a7dfb2bb13fdcbb8efd227
|
# -*- coding: utf-8 -*-
#
# hl_api_simulation.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Functions for simulation control
"""
from contextlib import contextmanager
from ..ll_api import *
from .hl_api_helper import *
__all__ = [
'Cleanup',
'DisableStructuralPlasticity',
'EnableStructuralPlasticity',
'GetKernelStatus',
'Install',
'Prepare',
'ResetKernel',
'Run',
'RunManager',
'SetKernelStatus',
'Simulate',
]
@check_stack
def Simulate(t):
"""Simulate the network for `t` milliseconds.
Parameters
----------
t : float
Time to simulate in ms
See Also
--------
RunManager
"""
sps(float(t))
sr('ms Simulate')
@check_stack
def Run(t):
"""Simulate the network for `t` milliseconds.
Parameters
----------
t : float
Time to simulate in ms
Notes
------
Call between `Prepare` and `Cleanup` calls, or within a
``with RunManager`` clause.
Simulate(t): t' = t/m; Prepare(); for _ in range(m): Run(t'); Cleanup()
`Prepare` must be called before `Run` to calibrate the system, and
`Cleanup` must be called after `Run` to close files, cleanup handles, and
so on. After `Cleanup`, `Prepare` can and must be called before more `Run`
calls. Any calls to `SetStatus` between `Prepare` and `Cleanup` have
undefined behaviour.
See Also
--------
Prepare, Cleanup, RunManager, Simulate
"""
sps(float(t))
sr('ms Run')
@check_stack
def Prepare():
"""Calibrate the system before a `Run` call. Not needed for `Simulate`.
Call before the first `Run` call, or before calling `Run` after changing
the system, calling `SetStatus` or `Cleanup`.
See Also
--------
Run, Cleanup
"""
sr('Prepare')
@check_stack
def Cleanup():
"""Cleans up resources after a `Run` call. Not needed for `Simulate`.
Closes state for a series of runs, such as flushing and closing files.
A `Prepare` is needed after a `Cleanup` before any more calls to `Run`.
See Also
--------
Run, Prepare
"""
sr('Cleanup')
@contextmanager
def RunManager():
"""ContextManager for `Run`
Calls `Prepare` before a series of `Run` calls, and calls `Cleanup` at end.
E.g.:
::
with RunManager():
for i in range(10):
Run()
See Also
--------
Prepare, Run, Cleanup, Simulate
"""
Prepare()
try:
yield
finally:
Cleanup()
@check_stack
def ResetKernel():
"""Reset the simulation kernel.
This will destroy the network as well as all custom models created with
:py:func:`.CopyModel`. Calling this function is equivalent to restarting NEST.
In particular,
* all network nodes
* all connections
* all user-defined neuron and synapse models
are deleted, and
* time
* random generators
are reset. The only exception is that dynamically loaded modules are not
unloaded. This may change in a future version of NEST.
"""
sr('ResetKernel')
@check_stack
def SetKernelStatus(params):
"""Set parameters for the simulation kernel.
Parameters
----------
params : dict
Dictionary of parameters to set.
Params dictionary
Some of the keywords in the kernel status dictionary are internally
calculated, and cannot be defined by the user. These are flagged as
`read only` in the parameter list. Use GetKernelStatus to access their
assigned values.
Time and resolution
Parameters
----------
resolution : float
The resolution of the simulation (in ms)
time : float
The current simulation time (in ms)
to_do : int, read only
The number of steps yet to be simulated
max_delay : float
The maximum delay in the network
min_delay : float
The minimum delay in the network
ms_per_tic : float
The number of milliseconds per tic
tics_per_ms : float
The number of tics per millisecond
tics_per_step : int
The number of tics per simulation time step
T_max : float, read only
The largest representable time value
T_min : float, read only
The smallest representable time value
Parallel processing
Parameters
----------
total_num_virtual_procs : int
The total number of virtual processes
local_num_threads : int
The local number of threads
num_processes : int, read only
The number of MPI processes
off_grid_spiking : bool
Whether to transmit precise spike times in MPI communication
grng_seed : int
Seed for global random number generator used synchronously by all
virtual processes to create, e.g., fixed fan-out connections.
rng_seeds : array
Seeds for the per-virtual-process random number generators used for
most purposes. Array with one integer per virtual process, all must
be unique and differ from grng_seed.
MPI buffers
Parameters
----------
adaptive_spike_buffers : bool
Whether MPI buffers for communication of spikes resize on the fly
adaptive_target_buffers : bool
Whether MPI buffers for communication of connections resize on the fly
buffer_size_secondary_events : int, read only
Size of MPI buffers for communicating secondary events (in bytes, per
MPI rank, for developers)
buffer_size_spike_data : int
Total size of MPI buffer for communication of spikes
buffer_size_target_data : int
Total size of MPI buffer for communication of connections
growth_factor_buffer_spike_data : float
If MPI buffers for communication of spikes resize on the fly, grow
them by this factor each round
growth_factor_buffer_target_data : float
If MPI buffers for communication of connections resize on the fly, grow
them by this factor each round
max_buffer_size_spike_data : int
Maximal size of MPI buffers for communication of spikes.
max_buffer_size_target_data : int
Maximal size of MPI buffers for communication of connections
Waveform relaxation method (wfr)
Parameters
----------
use_wfr : bool
Whether to use waveform relaxation method
wfr_comm_interval : float
Desired waveform relaxation communication interval
wfr_tol : float
Convergence tolerance of waveform relaxation method
wfr_max_iterations : int
Maximal number of iterations used for waveform relaxation
wfr_interpolation_order : int
Interpolation order of polynomial used in wfr iterations
Synapses
Parameters
----------
max_num_syn_models : int, read only
Maximal number of synapse models supported
sort_connections_by_source : bool
Whether to sort connections by their source; increases construction
time of presynaptic data structures, decreases simulation time if the
average number of outgoing connections per neuron is smaller than the
total number of threads
structural_plasticity_synapses : dict
Defines all synapses which are plastic for the structural plasticity
algorithm. Each entry in the dictionary is composed of a synapse model,
the pre synaptic element and the postsynaptic element
structural_plasticity_update_interval : int
Defines the time interval in ms at which the structural plasticity
manager will make changes in the structure of the network (creation
and deletion of plastic synapses)
Output
Returns
-------
data_path : str
A path, where all data is written to (default is the current
directory)
data_prefix : str
A common prefix for all data files
overwrite_files : bool
Whether to overwrite existing data files
print_time : bool
Whether to print progress information during the simulation
network_size : int, read only
The number of nodes in the network
num_connections : int, read only, local only
The number of connections in the network
local_spike_counter : int, read only
Number of spikes fired by neurons on a given MPI rank since NEST was
started or the last ResetKernel. Only spikes from "normal" neurons
(neuron models with proxies) are counted, not spikes generated by
devices such as poisson_generator.
Miscellaneous
Other Parameters
----------------
dict_miss_is_error : bool
Whether missed dictionary entries are treated as errors
keep_source_table : bool
Whether to keep source table after connection setup is complete
See Also
--------
GetKernelStatus
"""
sps(params)
sr('SetKernelStatus')
@check_stack
def GetKernelStatus(keys=None):
"""Obtain parameters of the simulation kernel.
Parameters
----------
keys : str or list, optional
Single parameter name or `list` of parameter names
Returns
-------
dict:
Parameter dictionary, if called without argument
type:
Single parameter value, if called with single parameter name
list:
List of parameter values, if called with list of parameter names
Raises
------
TypeError
If `keys` are of the wrong type.
Notes
-----
See SetKernelStatus for documentation on each parameter key.
See Also
--------
SetKernelStatus
"""
sr('GetKernelStatus')
status_root = spp()
if keys is None:
return status_root
elif is_literal(keys):
return status_root[keys]
elif is_iterable(keys):
return tuple(status_root[k] for k in keys)
else:
raise TypeError("keys should be either a string or an iterable")
@check_stack
def Install(module_name):
"""Load a dynamically linked NEST module.
Parameters
----------
module_name : str
Name of the dynamically linked module
Returns
-------
handle
NEST module identifier, required for unloading
Notes
-----
Dynamically linked modules are searched in the NEST library
directory (``<prefix>/lib/nest``) and in ``LD_LIBRARY_PATH`` (on
Linux) or ``DYLD_LIBRARY_PATH`` (on OSX).
**Example**
::
nest.Install("mymodule")
"""
return sr("(%s) Install" % module_name)
@check_stack
def EnableStructuralPlasticity():
"""Enable structural plasticity for the network simulation
See Also
--------
DisableStructuralPlasticity
"""
sr('EnableStructuralPlasticity')
@check_stack
def DisableStructuralPlasticity():
"""Disable structural plasticity for the network simulation
See Also
--------
EnableStructuralPlasticity
"""
sr('DisableStructuralPlasticity')
|
SepehrMN/nest-simulator
|
pynest/nest/lib/hl_api_simulation.py
|
Python
|
gpl-2.0
| 11,564
|
[
"NEURON"
] |
ba2cdca79071153ed2802b3c53b4ee700608094d944bc7462737cc12970e6546
|
import cv2
import numpy as np
COLOR_SWATCH = [
[255, 64, 0], [255, 128, 0], [255, 191, 0],
[255, 255, 0], [191, 255, 0], [128, 255, 0], [64, 255, 0],
[0, 255, 0], [0, 255, 64], [0, 255, 128],
[0, 255, 191], [0, 255, 255], [0, 191, 255],
[0, 128, 255], [0, 64, 255], [0, 0, 255],
[64, 0, 255], [128, 0, 255], [191, 0, 255],
[255, 0, 255], [255, 0, 191], [255, 0, 128],
[255, 0, 64], [255, 0, 0]
]
def getColor(counter=0):
return COLOR_SWATCH[(counter*5) % len(COLOR_SWATCH)]
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
i = 0
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), getColor(i), thickness)
i = i + 1
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., λ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + λ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, λ)
|
jabeerahmed/testrepo
|
term1/Lessons/LaneDetection/Helper.py
|
Python
|
gpl-2.0
| 4,155
|
[
"Gaussian"
] |
41df5150dcc8a74d9b3018a191b3990c8e3ea6de87993e515fa78db1989ba995
|
#!/usr/bin/env python
""" Loads forecast info, fits beta distributions to marginals.
"""
import pandas as pd
import numpy as np
import os
from kernel_regression import KernelRegression
from itertools import izip
__copyright__ = "Copyright 2016, Tue Vissing Jensen"
__credits__ = ["Tue Vissing Jensen"]
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Tue Vissing Jensen"
__email__ = "tvjens@elektro.dtu.dk"
__status__ = "Prototype"
def get_beta_params(mean, variance):
alpha = mean*(mean*(1-mean)/variance - 1)
beta = (1-mean)*(mean*(1-mean)/variance - 1)
return alpha, beta
CATEGORY = 'wind'
CATEGORY = 'solar'
FCNAME = 'fc'
OBSNAME = 'ts'
TESTNODE = 1069
TESTDELTA = '2d 3h'
MIN_VARIANCE = 0.0001
TSVAULTFILE = 'data/TSVault.h5'
AUTOSCALE = True
kreg = KernelRegression()
testx = np.linspace(0, 1, 101)
# Possible gammas
gammas = np.logspace(1, 4, 31)
TEST_GAMMA = 100 # Gaussian width ~ 1/100 = 1% of production
kreg.gamma = TEST_GAMMA
store = pd.HDFStore(TSVAULTFILE)
nodes = store['nodes']
store.close()
outmeandict = {}
outvardict = {}
scalefactors = {}
for node in nodes:
print node
store = pd.HDFStore(TSVAULTFILE)
fcdf = store['/'.join((CATEGORY, FCNAME, node))]
obsdf = store['/'.join((CATEGORY, OBSNAME, node))]
store.close()
if AUTOSCALE:
scalefactor = max((1.0, obsdf.max().max(), fcdf.max().max()))
else:
scalefactor = 1.0
scalefactors[node] = scalefactor
outmeandict[node] = {}
outvardict[node] = {}
# Pull out each node's DF's seperately.
for (k1, fcc), (k2, obsc) in izip(fcdf.iteritems(), obsdf.iteritems()):
# print k1
obsc = obsc.dropna()/scalefactor
fcc = fcc.ix[obsc.index]/scalefactor
# Kernelregression Forecasted => mean
kreg.fit(fcc.values.reshape(-1, 1), obsc)
meanpredict = kreg.predict(testx.reshape(-1, 1))
# Select optimal bandwidth
# kreg.gamma = kreg._optimize_gamma(gammas)
# Our predicted mean based on point forecast
# prediction = np.polyval(meanpolycoeff, fcc)
prediction = np.interp(fcc, testx, meanpredict)
# Calculate errors squared
err2 = (prediction - obsc)**2
# Fit variance curve
kreg.fit(fcc.values.reshape(-1, 1), err2)
varpredict = kreg.predict(testx.reshape(-1, 1))
# Select optimal bandwidth
# kreg.gamma = kreg._optimize_gamma(gammas)
# Save the coefficients of the polynomial fit.
outmeandict[node][k1] = meanpredict
outvardict[node][k1] = varpredict
ks = fcdf.columns
intnodes = nodes.map(lambda x: int(x[1:]))
meanpanel = pd.Panel(items=intnodes, major_axis=ks, minor_axis=testx, data=[[outmeandict[n][k] for k in ks] for n in nodes])
varpanel = pd.Panel(items=intnodes, major_axis=ks, minor_axis=testx, data=[[outvardict[n][k] for k in ks] for n in nodes])
meanpanel.major_axis = [pd.Timedelta(x) for x in meanpanel.major_axis]
varpanel.major_axis = [pd.Timedelta(x) for x in varpanel.major_axis]
cpanel = meanpanel.multiply(1-meanpanel).divide(varpanel) - 1
alphapanel = meanpanel.multiply(cpanel)
betapanel = (1-meanpanel).multiply(cpanel)
scalefactorseries = pd.Series(scalefactors)[nodes]
scalefactorseries.index = intnodes
store = pd.HDFStore('data/marginalstore.h5')
store['/'.join((CATEGORY, 'mean'))] = meanpanel
store['/'.join((CATEGORY, 'var'))] = varpanel
store['/'.join((CATEGORY, 'alpha'))] = alphapanel
store['/'.join((CATEGORY, 'beta'))] = betapanel
store['/'.join((CATEGORY, 'scalefactors'))] = scalefactorseries
store.close()
|
TueVJ/RE-Europe_SimpleEnsembles
|
forecast_marginal_estimator.py
|
Python
|
mit
| 3,570
|
[
"Gaussian"
] |
be6b81983e7c1671099cd9fce6a6a0d86a1a2c03b4ff0987e540ad8be1619415
|
import imp
import os
from DIRAC import S_OK, S_ERROR, gConfig, gLogger
from DIRAC.Core.Utilities.CFG import CFG
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.ConfigurationSystem.Client.Helpers import CSGlobals
BASECS = "WebApp"
def loadWebAppCFGFiles():
"""
Load WebApp/web.cfg definitions
"""
exts = []
for ext in CSGlobals.getCSExtensions():
if ext == "DIRAC":
continue
if ext[-5:] != "DIRAC":
ext = "%sDIRAC" % ext
if ext != "WebAppDIRAC":
exts.append( ext )
exts.append( "DIRAC" )
exts.append( "WebAppDIRAC" )
webCFG = CFG()
for modName in reversed( exts ):
try:
modPath = imp.find_module( modName )[1]
except ImportError:
continue
gLogger.verbose( "Found module %s at %s" % ( modName, modPath ) )
cfgPath = os.path.join( modPath, "WebApp", "web.cfg" )
if not os.path.isfile( cfgPath ):
gLogger.verbose( "Inexistant %s" % cfgPath )
continue
try:
modCFG = CFG().loadFromFile( cfgPath )
except Exception, excp:
gLogger.error( "Could not load %s: %s" % ( cfgPath, excp ) )
continue
gLogger.verbose( "Loaded %s" % cfgPath )
expl = [ BASECS ]
while len( expl ):
current = expl.pop( 0 )
if not modCFG.isSection( current ):
continue
if modCFG.getOption( "%s/AbsoluteDefinition" % current, False ):
gLogger.verbose( "%s:%s is an absolute definition" % ( modName, current ) )
try:
webCFG.deleteKey( current )
except:
pass
modCFG.deleteKey( "%s/AbsoluteDefinition" % current )
else:
for sec in modCFG[ current ].listSections():
expl.append( "%s/%s" % ( current, sec ) )
#Add the modCFG
webCFG = webCFG.mergeWith( modCFG )
gConfig.loadCFG( webCFG )
def getRawSchema():
"""
Load the schema from the CS
"""
base = "%s/Schema" % ( BASECS )
schema = []
explore = [ ( "", schema ) ]
while len( explore ):
parentName, parentData = explore.pop( 0 )
fullName = "%s/%s" % ( base, parentName )
result = gConfig.getSections( fullName )
if not result[ 'OK' ]:
continue
sectionsList = result[ 'Value' ]
for sName in sectionsList:
sData = []
parentData.append( ( "%s/%s" % ( parentName, sName ), sData ) )
explore.append( ( sName, sData ) )
result = gConfig.getOptions( fullName )
if not result[ 'OK' ]:
continue
optionsList = result[ 'Value' ]
for opName in optionsList:
opVal = gConfig.getValue( "%s/%s" % ( fullName, opName ) )
if opVal.find( "link|" ) == 0:
parentData.append( ( "link", opName, opVal[5:] ) )
else:
parentData.append( ( "app", opName, opVal ) )
return schema
|
chaen/WebAppDIRAC
|
Core/ConfMgr.py
|
Python
|
gpl-3.0
| 2,773
|
[
"DIRAC"
] |
abdcb9269c21866ea6cba78c5f8ff6616a7573d6cc89b46e7caa4a371d02eaef
|
import ast
import itertools
from ..core.ast_helper import get_call_names
class VarsVisitor(ast.NodeVisitor):
def __init__(self):
self.result = list()
def visit_Name(self, node):
self.result.append(node.id)
def visit_BoolOp(self, node):
for v in node.values:
self.visit(v)
def visit_BinOp(self, node):
self.visit(node.left)
self.visit(node.right)
def visit_UnaryOp(self, node):
self.visit(node.operand)
def visit_Lambda(self, node):
self.visit(node.body)
def visit_IfExp(self, node):
self.visit(node.test)
self.visit(node.body)
self.visit(node.orelse)
def visit_Dict(self, node):
for k in node.keys:
if k is not None:
self.visit(k)
for v in node.values:
self.visit(v)
def visit_Set(self, node):
for e in node.elts:
self.visit(e)
def comprehension(self, node):
self.visit(node.target)
self.visit(node.iter)
for c in node.ifs:
self.visit(c)
def visit_ListComp(self, node):
self.visit(node.elt)
for gen in node.generators:
self.comprehension(gen)
def visit_SetComp(self, node):
self.visit(node.elt)
for gen in node.generators:
self.comprehension(gen)
def visit_DictComp(self, node):
self.visit(node.key)
self.visit(node.value)
for gen in node.generators:
self.comprehension(gen)
def visit_GeneratorComp(self, node):
self.visit(node.elt)
for gen in node.generators:
self.comprehension(gen)
def visit_Yield(self, node):
if node.value:
self.visit(node.value)
def visit_YieldFrom(self, node):
self.visit(node.value)
def visit_Compare(self, node):
self.visit(node.left)
for c in node.comparators:
self.visit(c)
def visit_Call(self, node):
# This will not visit Flask in Flask(__name__) but it will visit request in `request.args.get()
if not isinstance(node.func, ast.Name):
self.visit(node.func)
for arg_node in itertools.chain(node.args, node.keywords):
arg = arg_node.value if isinstance(arg_node, ast.keyword) else arg_node
if isinstance(arg, ast.Call):
if isinstance(arg.func, ast.Name):
# We can't just visit because we need to add 'ret_'
self.result.append('ret_' + arg.func.id)
elif isinstance(arg.func, ast.Attribute):
# e.g. html.replace('{{ param }}', param)
# func.attr is replace
# func.value.id is html
# We want replace
self.result.append('ret_' + arg.func.attr)
elif isinstance(arg.func, ast.Call):
self.visit_curried_call_inside_call_args(arg)
else:
raise Exception('Cannot visit vars of ' + ast.dump(arg))
else:
self.visit(arg)
def visit_curried_call_inside_call_args(self, inner_call):
# Curried functions aren't supported really, but we now at least have a defined behaviour.
# In f(g(a)(b)(c)), inner_call is the Call node with argument c
# Try to get the name of curried function g
curried_func = inner_call.func.func
while isinstance(curried_func, ast.Call):
curried_func = curried_func.func
if isinstance(curried_func, ast.Name):
self.result.append('ret_' + curried_func.id)
elif isinstance(curried_func, ast.Attribute):
self.result.append('ret_' + curried_func.attr)
# Visit all arguments except a (ignore the curried function g)
not_curried = inner_call
while not_curried.func is not curried_func:
for arg in itertools.chain(not_curried.args, not_curried.keywords):
self.visit(arg.value if isinstance(arg, ast.keyword) else arg)
not_curried = not_curried.func
def visit_Attribute(self, node):
if not isinstance(node.value, ast.Name):
self.visit(node.value)
else:
self.result.append(node.value.id)
def slicev(self, node):
if isinstance(node, ast.Slice):
if node.lower:
self.visit(node.lower)
if node.upper:
self.visit(node.upper)
if node.step:
self.visit(node.step)
elif isinstance(node, ast.ExtSlice):
if node.dims:
for d in node.dims:
self.visit(d)
else:
self.visit(node.value)
def visit_Subscript(self, node):
if isinstance(node.value, ast.Attribute):
# foo.bar[1]
self.result.append(list(get_call_names(node.value))[0])
self.visit(node.value)
self.slicev(node.slice)
def visit_Starred(self, node):
self.visit(node.value)
def visit_List(self, node):
for el in node.elts:
self.visit(el)
def visit_Tuple(self, node):
for el in node.elts:
self.visit(el)
|
python-security/pyt
|
pyt/helper_visitors/vars_visitor.py
|
Python
|
gpl-2.0
| 5,270
|
[
"VisIt"
] |
4c24a9e5405c1324ff697266828fa2c3242ca43facbf63366886fa4776b10658
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Common functions.
"""
import sys
import logging
import gzip
import numpy as np
import biochem_tools
import os
import fnmatch
__author__ = "David Hoksza"
__email__ = "david.hoksza@mff.cuni.cz"
__license__ = 'X11'
def find_files_recursively(directory, pattern):
matches = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches
def open_file(file_name, mode="r"):
access_type = mode
if sys.version_info >= (3,): access_type = mode + "t"
if file_name.endswith("gz"):
return gzip.open(file_name, access_type)
else:
return open(file_name, access_type)
def init_logging():
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s [%(levelname)s] %(module)s - %(message)s',
datefmt='%H:%M:%S')
def to_float(x):
try:
a = float(x)
if np.isinf(a): a = float('nan')
except ValueError:
return float('nan')
else:
return a
def fragments_extraction(ds_file_names, fragment_types):
extraction_options = {
'kekule': False,
'isomeric': False,
'fragments': fragment_types
}
parsed_types = []
for item in fragment_types.split(','):
item_split = item.split('.')
if not len(item_split) == 2:
logging.error('Invalid fragment type: %s', item)
logging.info(' Expected format {TYPE}.{SIZE}')
exit(1)
parsed_types.append({
'name': item_split[0],
'size': int(item_split[1])
})
extraction_options['fragments'] = parsed_types
fn_json = []
for fn in ds_file_names:
file_type = "sdf"
if fn.endswith(".smi"):
file_type = "smi"
if fn.endswith(".smi") or fn.endswith(".sdf"):
fn_json.append(fn[:-4] + ".frags.json")
else:
fn_json.append(fn + ".json")
biochem_tools.extract_fragments([fn], file_type, fn_json[-1], extraction_options)
return fn_json
def descriptors_extraction(json_file_names, descriptors_generator, padel_path):
fn_csv = []
for fn in json_file_names:
fn_csv.append(fn[:-5] + ".csv")
if descriptors_generator == "rdkit":
biochem_tools.rdkit_compute_descriptors(fn, fn_csv[-1], True)
elif descriptors_generator == "padel":
biochem_tools.padel_compute_descriptors(fn, fn_csv[-1], True, padel_path)
return fn_csv
def delete_files(file_names):
for fn in file_names:
os.remove(fn)
|
davidhoksza/bayescreen
|
common.py
|
Python
|
mit
| 2,782
|
[
"RDKit"
] |
4b6fbc881e56320c8f4614e7066f4ff069bed7f8cc4ab8f9aa3cf9df29b8517c
|
"""Bayesian Data Analysis, 3r ed
Chapter 3, demo 1
Visualise the joint density and marginal densities of posterior of normal
distribution with unknown mean and variance.
"""
from __future__ import division
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# import from utilities
import os
util_path = '../utilities_and_data' # provide path to utilities
util_path = os.path.abspath(util_path)
if util_path not in os.sys.path and os.path.exists(util_path):
os.sys.path.insert(0, util_path)
import sinvchi2
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8')
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
# data
y = np.array([93, 112, 122, 135, 122, 150, 118, 90, 124, 114])
# sufficient statistics
n = len(y)
s2 = np.var(y, ddof=1) # Here ddof=1 is used to get the sample estimate.
my = np.mean(y)
# Factorize the joint posterior p(mu,sigma2|y) to p(sigma2|y)p(mu|sigma2,y)
# Sample from the joint posterior using this factorization
# sample from p(sigma2|y)
sigma2 = sinvchi2.rvs(n-1, s2, size=1000)
# sample from p(mu|sigma2,y)
mu = my + np.sqrt(sigma2/n)*np.random.randn(*sigma2.shape)
# display sigma instead of sigma2
sigma = np.sqrt(sigma2)
# For mu compute the density in these points
tl1 = [90, 150]
t1 = np.linspace(tl1[0], tl1[1], 1000)
# For sigma compute the density in these points
tl2 = [10, 60]
t2 = np.linspace(tl2[0], tl2[1], 1000)
# evaluate the joint density in a grid
# note that the following is not normalized, but for plotting
# contours it does not matter
Z = stats.norm.pdf(t1, my, t2[:,np.newaxis]/np.sqrt(n))
Z *= (sinvchi2.pdf(t2**2, n-1, s2)*2*t2)[:,np.newaxis]
# compute the exact marginal density for mu
# multiplication by 1./sqrt(s2/n) is due to the transformation of variable
# z=(x-mean(y))/sqrt(s2/n), see BDA3 p. 21
pm_mu = stats.t.pdf((t1 - my) / np.sqrt(s2/n), n-1) / np.sqrt(s2/n)
# estimate the marginal density for mu using samples and an ad hoc
# Gaussian kernel approximation
pk_mu = stats.gaussian_kde(mu).evaluate(t1)
# compute the exact marginal density for sigma
# multiplication by 2*t2 is due to the transformation of variable
# z=t2^2, see BDA3 p. 21
pm_sigma = sinvchi2.pdf(t2**2, n-1, s2)*2*t2
# N.B. this was already calculated in the joint distribution case
# estimate the marginal density for sigma using samples and an ad hoc Gaussian
# kernel approximation
pk_sigma = stats.gaussian_kde(sigma).evaluate(t2)
# ====== Plotting
# create figure
plotgrid = gridspec.GridSpec(2, 2, width_ratios=[3,2], height_ratios=[3,2])
plt.figure(figsize=(12,12))
# plot the joint distribution
plt.subplot(plotgrid[0,0])
# plot the contour plot of the exact posterior (c_levels is used to give
# a vector of linearly spaced values at which levels contours are drawn)
c_levels = np.linspace(1e-5, Z.max(), 6)[:-1]
plt.contour(t1, t2, Z, c_levels, colors='blue')
# plot the samples from the joint posterior
samps = plt.scatter(mu, sigma, 5, color=[0.25, 0.75, 0.25])
# decorate
plt.xlim(tl1)
plt.ylim(tl2)
plt.xlabel('$\mu$', fontsize=20)
plt.ylabel('$\sigma$', fontsize=20)
plt.title('joint posterior')
plt.legend(
(plt.Line2D([], [], color='blue'), samps),
('exact contour plot', 'samples')
)
# plot the marginal of mu
plt.subplot(plotgrid[1,0])
# empirical
plt.plot(t1, pk_mu, color='#ff8f20', linewidth=2.5, label='empirical')
# exact
plt.plot(t1, pm_mu, 'k--', linewidth=1.5, label='exact')
# decorate
plt.xlim(tl1)
plt.title('marginal of $\mu$')
plt.yticks(())
plt.legend()
# plot the marginal of sigma
plt.subplot(plotgrid[0,1])
# empirical
plt.plot(pk_sigma, t2, color='#ff8f20', linewidth=2.5, label='empirical')
# exact
plt.plot(pm_sigma, t2, 'k--', linewidth=1.5, label='exact')
# decorate
plt.ylim(tl2)
plt.title('marginal of $\sigma$')
plt.xticks(())
plt.legend()
plt.show()
|
lenovor/BDA_py_demos
|
demos_ch3/demo3_1.py
|
Python
|
gpl-3.0
| 3,966
|
[
"Gaussian"
] |
708b025535fabafe93d6c231a4259f8d1a93a3c4d0de0ce52b027ebb83e92715
|
# encoding: utf-8
from woo.dem import *
import woo.core
import woo.dem
import woo.pyderived
import woo.models
import math
from minieigen import *
nan=float('nan')
try:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
except ImportError: pass
class EllGroup(woo.core.Preprocessor,woo.pyderived.PyWooObject):
'Simulation of group of ellipsoids moving in 2-dimensional box.'
_classTraits=None
_PAT=woo.pyderived.PyAttrTrait # less typing
_attrTraits=[
_PAT(Vector2,'rRange',Vector2(.02,.04),unit='m',doc='Range (minimum and maximum) for particle radius (greatest semi-axis); if both are the same, all particles will have the same radius.'),
_PAT(bool,'spheres',False,doc='Use spherical particles instead of elliptical'),
_PAT(float,'semiMinRelRnd',0,hideIf='self.spheres',doc='Minimum semi-axis length relative to particle radius; minor semi-axes are randomly selected from (:obj:`semiMinRelRnd` … 1) × greatest semi-axis. If non-positive, :obj:`semiRelFixed` is used instead.'),
_PAT(Vector3,'semiRelFixed',Vector3(1.,.5,.5),hideIf='self.spheres',doc='Fixed sizes of semi-axes relative (all elements should be ≤ 1). The :math:`z`-component is the out-of-plane size which only indirectly influences contact stiffnesses. This variable is only used if semi-axes are not assigned randomly (see :obj:`semiMinRelRnd`).'),
_PAT(Vector2,'boxSize',Vector2(2,2),unit='m',doc='Size of the 2d domain in which particles move.'),
_PAT(float,'vMax',1.,unit='m/s',doc='Maximum initial velocity of particle; assigned randomly from 0 to this value; intial angular velocity of all particles is zero.'),
_PAT(woo.models.ContactModelSelector,'model',woo.models.ContactModelSelector(name='linear',surfEnergy=4.,restitution=1.,damping=0.01,alpha=.6,numMat=(1,2),matDesc=['ellipsoids','walls'],mats=[woo.dem.FrictMat(density=5000,young=1e6,tanPhi=0.0)]),doc='Select contact model. The first material is for particles; the second, optional, material is for walls at the boundary (the first material is used if there is no second one).'),
_PAT(str,'exportFmt',"/tmp/ell2d-{tid}-",filename=True,doc="Prefix for saving :obj:`woo.dem.VtkExport` data, and :obj:`woo.pre.ell2d.ell2plot` data; formatted with ``format()`` providing :obj:`woo.core.Scene.tags` as keys."),
_PAT(int,'vtkStep',0,"How often should :obj:`woo.dem.VtkExport` run. If non-positive, never run the export."),
_PAT(int,'vtkEllLev',1,'Tesselation level of ellipsoids when expored as VTK meshes (see :obj:`woo.dem.VtkExport.ellLev`).'),
_PAT(int,'ell2Step',0,"How often should :obj:`woo.pre.ell2d.ell2plot` run. If non-positive, never run that one."),
_PAT(float,'dtSafety',.5,'Safety coefficient for critical timestep; should be smaller than one.'),
]
def __init__(self,**kw):
woo.core.Preprocessor.__init__(self)
self.wooPyInit(self.__class__,woo.core.Preprocessor,**kw)
def __call__(self):
import woo
# preprocessor builds the simulation when called
pre=self # more readable
S=woo.core.Scene(fields=[woo.dem.DemField()],pre=self.deepcopy())
# material definitions
ellMat=pre.model.mats[0]
wallMat=(pre.model.mats[1] if len(pre.model.mats)>1 else ellMat)
ZZ=pre.rRange[1]*3;
# only generate spheres randomly in 2d box
S.engines=[woo.dem.BoxInlet2d(axis=2,box=((0,0,ZZ),(pre.boxSize[0],pre.boxSize[1],ZZ)),materials=[ellMat],generator=woo.dem.MinMaxSphereGenerator(dRange=2*pre.rRange),massRate=0),woo.dem.InsertionSortCollider([woo.dem.Bo1_Sphere_Aabb()])]
S.one()
posRad=[(p.pos,p.shape.radius) for p in S.dem.par]
# clear the dem field
S.fields=[woo.dem.DemField()]
import random
def rndOri2d():
q=Quaternion((0,0,1),2*math.pi*random.random()); q.normalize(); return q
S.energy['kin0']=0
for pos,rad in posRad:
if not pre.spheres:
if pre.semiMinRelRnd>0: semiAxes=[random.uniform(pre.semiMinRelRnd,1)*rad for i in (0,1,2)]
else: semiAxes=Vector3(pre.semiRelFixed)*rad
p=woo.utils.ellipsoid(center=pos,semiAxes=semiAxes,ori=rndOri2d(),mat=ellMat)
else: p=woo.utils.sphere(center=pos,radius=rad,mat=ellMat)
p.vel=rndOri2d()*Vector3(pre.vMax*random.random(),0,0)
S.energy['kin0']-=p.Ek
p.blocked='zXY'
S.dem.par.add(p)
#for coord,axis,sense in [(0,0,+1),(pre.boxSize[0],0,-1),(0,1,+1),(pre.boxSize[1],1,-1)]:
# S.dem.par.add(woo.utils.wall(coord,axis=axis,sense=sense,mat=wallMat,visible=False))
S.dem.par.add([
woo.dem.Wall.make(0,axis=0,mat=wallMat,visible=True),
woo.dem.Wall.make(0,axis=1,mat=wallMat,visible=True)
])
S.periodic=True
S.cell.setBox((pre.boxSize[0],pre.boxSize[1],2*ZZ))
S.engines=woo.dem.DemField.minimalEngines(model=pre.model,dynDtPeriod=10)+[
# trace particles and color by z-angVel
woo.dem.Tracer(num=100,compress=4,compSkip=1,glSmooth=True,glWidth=2,scalar=woo.dem.Tracer.scalarAngVel,vecAxis=2,stepPeriod=40,minDist=pre.rRange[0]),
woo.core.PyRunner(100,'S.plot.addData(i=S.step,t=S.time,total=S.energy.total(),relErr=(S.energy.relErr() if S.step>100 else 0),**S.energy)'),
woo.dem.VtkExport(stepPeriod=pre.vtkStep,out=pre.exportFmt,ellLev=pre.vtkEllLev,dead=(pre.vtkStep<=0)),
woo.core.PyRunner(pre.ell2Step,'import woo.pre.ell2d; mx=woo.pre.ell2d.ell2plot(out="%s-%05d.png"%(S.expandTags(S.pre.exportFmt),engine.nDone),S=S,colorRange=(0,S.lab.maxEll2Color),bbox=((0,0),S.pre.boxSize)); S.lab.maxEll2Color=max(mx,S.lab.maxEll2Color)',dead=(pre.ell2Step<=0)),
woo.dem.WeirdTriaxControl(goal=(-0,-0,0),stressMask=0,maxStrainRate=(.1,.1,0),mass=1.,label='triax'),
]
S.lab.leapfrog.kinSplit=True
S.dtSafety=pre.dtSafety
S.trackEnergy=True
S.uiBuild='import woo.pre.ell2d; woo.pre.ell2d.ellGroupUiBuild(S,area)'
S.lab.maxEll2Color=0. # max |angVel| for the start when plotting
S.plot.plots={'i':('total','**S.energy'),' t':('relErr')}
S.plot.data={'i':[nan],'total':[nan],'relErr':[nan]} # to make plot displayable from the very start
try:
import woo.gl
S.gl.renderer(iniUp=(0,1,0),iniViewDir=(0,0,-1),grid=4)
except ImportError: pass
return S
def ell2plot(out,S,bbox,colorRange,colorBy='angVel',**kw):
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
import matplotlib.collections, matplotlib.patches
import numpy
import math
import woo.dem
from minieigen import Vector2, Vector3
fig=Figure()
ax=fig.add_subplot(1,1,1)
canvas=FigureCanvasAgg(fig)
patches,colors=[],[]
def flat(v): return Vector2(v[0],v[1])
for p in S.dem.par:
if not isinstance(p.shape,woo.dem.Ellipsoid): continue
# project rotation onto the z-axis
rotAxis,rotAngle=p.ori.toAxisAngle()
if abs(rotAxis.dot(Vector3.UnitZ))<0.99: raise ValueError("Ellipsoid rotated other than along the z-axis?")
if rotAxis.dot(Vector3.UnitZ)<0: rotAngle*=-1 # rotation along -z = - rotation along +z
patches.append(matplotlib.patches.Ellipse(xy=flat(p.pos),width=2*p.shape.semiAxes[0],height=2*p.shape.semiAxes[1],angle=math.degrees(rotAngle)))
if colorBy=='angVel': colors.append(abs(p.angVel[2]))
elif colorBy=='vel': colors.append(p.vel[2])
else: raise ValueError('colorBy must be one of "angVel", "vel" (not %s)'%colorBy)
coll=matplotlib.collections.PatchCollection(patches,cmap=matplotlib.cm.jet,alpha=.9)
coll.set_array(numpy.array(colors))
ax.add_collection(coll)
# cbar=fig.colorbar(coll)
coll.set_clim(*colorRange)
ax.grid(True)
ax.set_xlim(bbox[0][0],bbox[1][0])
ax.set_ylim(bbox[0][1],bbox[1][1])
ax.set_aspect('equal')
fig.savefig(out)
return max(colors)
def ellGroupUiBuild(S,area):
grid=QGridLayout(area); grid.setSpacing(0); grid.setMargin(0)
bHalf=QPushButton('Halve Ek')
bDouble=QPushButton('Double Ek')
boxEps=QDoubleSpinBox()
boxEps.setValue(0.)
boxEps.setSingleStep(0.1)
boxEps.setRange(-2.,1.)
grid.addWidget(bHalf,0,0)
grid.addWidget(bDouble,0,1)
grid.addWidget(QLabel('goal size'),1,0)
grid.addWidget(boxEps,1,1)
grid.boxEps=boxEps
def ekAdjust(S,coeff,grid):
with S.paused():
for n in S.dem.nodes:
n.dem.vel*=coeff**2
n.dem.angVel=n.dem.angVel*coeff**2
def epsChange(S,eps,grid):
S.lab.triax.goal=(eps,eps,0)
bHalf.clicked.connect(lambda: ekAdjust(S,.5,grid))
bDouble.clicked.connect(lambda: ekAdjust(S,2.,grid))
boxEps.valueChanged.connect(lambda eps: epsChange(S,eps,grid))
def uiRefresh(grid,S,area):
# update updatable stuff
if S.lab.triax.goal[0]!=grid.boxEps.value(): grid.boxEps.setValue(S.lab.triax.goal[0])
grid.refreshTimer=QTimer(grid)
grid.refreshTimer.timeout.connect(lambda: uiRefresh(grid,S,area))
grid.refreshTimer.start(500)
|
eudoxos/woodem
|
py/pre/ell2d.py
|
Python
|
gpl-2.0
| 8,521
|
[
"VTK"
] |
9460c80c632c4c59cd116426515deabf8cf6e9dbb33870edebe6370b40e384db
|
import os
import sys
import numpy as np
import fitsio
from astrometry.util.fits import *
def floatcompress(fn, truncbits):
I,hdr = fitsio.read(fn, header=True)
print I.shape, I.dtype
assert(I.dtype == np.float32)
F = np.zeros(I.shape, np.uint32)
F.data[:] = I.data
# IEEE -- ASSUME little-endian
signbit = (1 << 31)
expbits = (0xff << 23)
manbits = 0x7fffff
# print 'sign %08x' % signbit
# print 'exp %08x' % expbits
# print 'man %08x' % manbits
# print '0x%x' % (signbit + expbits + manbits)
signvals = (F & signbit)
expvals = (F & expbits)
manvals = (F & manbits)
# mad = np.median(np.abs(I[:-5:10,:-5:10] - I[5::10,5::10]).ravel())
# # convert to Gaussian -- sqrt(2) because our diffs are the
# # differences of deviations of two pixels.
# sig1 = 1.4826 * mad / np.sqrt(2.)
# print 'sigma', sig1
keepbits = np.uint32(0xffffffff - ((1 << truncbits)-1))
#print 'keepbits', keepbits
F[:] = (signvals | expvals | (manvals & keepbits))
#approx = np.empty_like(I)
#approx.data[:] = F.data
I.data[:] = F.data
approx = I
#diff = I - approx
# print 'Truncbits:', truncbits
# print 'abs max diff:', np.max(np.abs(diff))
# print 'mean abs diff:', np.mean(np.abs(diff))
# print 'abs max relative diff:', np.max(np.abs(diff[I != 0] / I[I != 0]))
# print 'mean abs diff:', np.mean(np.abs(diff[I != 0] / I[I != 0]))
# print 'sigma:', sig1
return approx, hdr
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--start', type=int, default=0, help='Start at row number...')
parser.add_argument('-n', type=int, default=0, help='Run this many rows...')
parser.add_argument('--dry-run', dest='dryrun', action='store_true')
parser.add_argument('--skip-existing', dest='skip', action='store_true')
parser.add_argument('-b', dest='bands', action='append', type=int, default=[],
help='Add WISE band (default: 1,2)')
parser.add_argument('tiles', metavar='tile', type=str, nargs='+',
help='Individual tiles to run')
opt = parser.parse_args()
if len(opt.bands) == 0:
opt.bands = [1,2,3,4]
# Allow specifying bands like "123"
bb = []
for band in opt.bands:
for s in str(band):
bb.append(int(s))
opt.bands = bb
#indirs = ['data/unwise', 'data/unwise-nersc']
#outdir = 'data/unwise-comp-4'
#indirs = ['data/unwise',]
indirs = ['data/unwise-4', 'data/unwise', 'data/unwise-nersc']
outdir = 'data/unwise-comp'
bands = opt.bands
# (pattern, drop-bits, compress)
pats = [('unwise-%s-w%i-img-m.fits', 0, False),
('unwise-%s-w%i-img-u.fits', 0, False),
('unwise-%s-w%i-invvar-m.fits', 11, True),
('unwise-%s-w%i-invvar-u.fits', 11, True),
('unwise-%s-w%i-std-m.fits', 11, True),
('unwise-%s-w%i-std-u.fits', 11, True),
('unwise-%s-w%i-n-m.fits', 0, True),
('unwise-%s-w%i-n-u.fits', 0, True),
('unwise-%s-w%i-frames.fits', 0, False),
]
gzargs = ''
if len(opt.tiles):
tiles = opt.tiles
else:
T = fits_table('allsky-atlas.fits')
T.cut(np.argsort(T.coadd_id))
tiles = T.coadd_id
if opt.start:
T = T[opt.start:]
if opt.n:
T = T[:opt.n]
for i,coadd in enumerate(tiles):
for band in bands:
print
print 'Row', opt.start + i, 'coadd', coadd, 'band', band
print
outpath = os.path.join(outdir, coadd[:3], coadd)
if not os.path.exists(outpath):
os.makedirs(outpath)
# Skip inputs that don't exist
exists = False
fns = []
for indir in indirs:
fn = os.path.join(indir, coadd[:3], coadd, pats[0][0] % (coadd, band))
if os.path.exists(fn):
exists = True
break
fns.append(fn)
if not exists:
print 'Input file does not exist:', fns
continue
maskdir = 'unwise-%s-w%i-mask' % (coadd, band)
for indir in indirs:
fn = maskdir + '.tgz'
fn = os.path.join(indir, coadd[:3], coadd, fn)
print 'Checking', fn
if os.path.exists(fn):
print 'exists'
break
fn = maskdir
fn = os.path.join(indir, coadd[:3], coadd, fn)
print 'Checking', fn
if os.path.exists(fn):
print 'exists'
break
assert(os.path.exists(fn))
outfn = os.path.join(outpath, maskdir + '.tgz')
absoutfn = os.path.abspath(outfn)
if not fn.endswith('.tgz'):
# tar
dirfn = os.path.dirname(fn)
cmd = '(cd %s && tar czf %s %s)' % (dirfn, absoutfn, maskdir)
else:
# copy
cmd = 'cp %s %s' % (fn, outfn)
if opt.skip and os.path.exists(outfn):
print 'Already exists:', outfn
continue
print cmd
if not opt.dryrun:
rtn = os.system(cmd)
if rtn:
sys.exit(rtn)
for pat, truncbits, compress in pats:
for indir in indirs:
fn = os.path.join(indir, coadd[:3], coadd, pat % (coadd, band))
print 'Looking for', fn
if os.path.exists(fn):
break
assert(os.path.exists(fn))
outfn = os.path.join(outpath, pat % (coadd, band))
print 'Writing to', outfn
if truncbits:
data,hdr = floatcompress(fn, truncbits)
hdr.add_record(dict(name='UNW_TRNC', value=truncbits,
comment='Floating-point bits truncated'))
if not opt.dryrun:
fitsio.write(outfn, data, header=hdr, clobber=True)
cmd = 'gzip -f %s %s' % (gzargs, outfn)
elif compress:
cmd = 'gzip -f %s -c %s > %s.gz' % (gzargs, fn, outfn)
else:
cmd = 'cp %s %s' % (fn, outfn)
print cmd
if not opt.dryrun:
rtn = os.system(cmd)
if rtn:
sys.exit(rtn)
|
dstndstn/unwise-coadds
|
merge.py
|
Python
|
gpl-2.0
| 6,712
|
[
"Gaussian"
] |
bb8a16506860c686a6d8bc689e149c231fce098651b64431cf751f43a779d3e9
|
'''Term class hierarchy.'''
# pylint: disable-msg=W0142
from aterm import types
from aterm import compare
from aterm import hash
from aterm import write
from aterm import lists
class Term(object):
'''Base class for all terms.
Terms are non-modifiable. Changes are carried out by returning another term
instance.
'''
# NOTE: most methods defer the execution to visitors
__slots__ = ['factory']
def __init__(self, factory):
self.factory = factory
# XXX: this has a large inpact in performance
if __debug__ and False:
def __setattr__(self, name, value):
'''Prevent modification of term attributes.'''
# TODO: implement this with a metaclass
try:
object.__getattribute__(self, name)
except AttributeError:
object.__setattr__(self, name, value)
else:
raise AttributeError("attempt to modify read-only term attribute '%s'" % name)
def __delattr__(self, name):
'''Prevent deletion of term attributes.'''
raise AttributeError("attempt to delete read-only term attribute '%s'" % name)
def getType(self):
'''Gets the type of this term.'''
return self.type
def getHash(self):
'''Generate a hash value for this term.'''
return hash.fullHash(self)
def getStructuralHash(self):
'''Generate a hash value for this term.
Annotations are not taken into account.
'''
return hash.structuralHash(self)
__hash__ = getStructuralHash
def isEquivalent(self, other):
'''Checks for structural equivalence of this term agains another term.'''
return compare.isEquivalent(self, other)
def isEqual(self, other):
'''Checks equality of this term against another term. Note that for two
terms to be equal, any annotations they might have must be equal as
well.'''
return compare.isEqual(self, other)
def __eq__(self, other):
if not isinstance(other, Term):
# TODO: produce a warning
return False
return compare.isEquivalent(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def rmatch(self, other):
'''Matches this term against a string pattern.'''
return self.factory.match(other, self)
def accept(self, visitor, *args, **kargs):
'''Accept a visitor.'''
raise NotImplementedError
def writeToTextFile(self, fp):
'''Write this term to a file object.'''
writer = write.TextWriter(fp)
writer.visit(self)
def __str__(self):
'''Get the string representation of this term.'''
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
fp = StringIO()
self.writeToTextFile(fp)
return fp.getvalue()
def __repr__(self):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
fp = StringIO()
writer = write.AbbrevTextWriter(fp, 3)
try:
writer.visit(self)
except:
fp.write('...<error>')
return '<Term %s>' % (fp.getvalue(),)
class Lit(Term):
'''Base class for literal terms.'''
__slots__ = ['value']
def __init__(self, factory, value):
Term.__init__(self, factory)
self.value = value
def getValue(self):
return self.value
class Integer(Lit):
'''Integer literal term.'''
__slots__ = []
type = types.INT
def __init__(self, factory, value):
if not isinstance(value, (int, long)):
raise TypeError('value is not an integer', value)
Lit.__init__(self, factory, value)
def __int__(self):
return int(self.value)
def accept(self, visitor, *args, **kargs):
return visitor.visitInt(self, *args, **kargs)
class Real(Lit):
'''Real literal term.'''
__slots__ = []
type = types.REAL
def __init__(self, factory, value):
if not isinstance(value, float):
raise TypeError('value is not a float', value)
Lit.__init__(self, factory, value)
def __float__(self):
return float(self.value)
def accept(self, visitor, *args, **kargs):
return visitor.visitReal(self, *args, **kargs)
class Str(Lit):
'''String literal term.'''
__slots__ = []
type = types.STR
def __init__(self, factory, value):
if not isinstance(value, str):
raise TypeError('value is not a string', value)
Lit.__init__(self, factory, value)
def accept(self, visitor, *args, **kargs):
return visitor.visitStr(self, *args, **kargs)
class List(Term):
'''Base class for list terms.'''
__slots__ = []
# Python's list compatability methods
def __nonzero__(self):
return not lists.empty(self)
def __len__(self):
return lists.length(self)
def __getitem__(self, index):
return lists.item(self, index)
def __iter__(self):
return lists.Iter(self)
def insert(self, index, element):
return lists.insert(self, index, element)
def append(self, element):
return lists.append(self, element)
def extend(self, other):
return lists.extend(self, other)
def reverse(self):
return lists.reverse(self)
def accept(self, visitor, *args, **kargs):
return visitor.visitList(self, *args, **kargs)
class Nil(List):
'''Empty list term.'''
__slots__ = []
type = types.NIL
def __init__(self, factory):
List.__init__(self, factory)
def accept(self, visitor, *args, **kargs):
return visitor.visitNil(self, *args, **kargs)
class Cons(List):
'''Concatenated list term.'''
__slots__ = ['head', 'tail']
type = types.CONS
def __init__(self, factory, head, tail):
List.__init__(self, factory)
if not isinstance(head, Term):
raise TypeError("head is not a term", head)
self.head = head
if not isinstance(tail, List):
raise TypeError("tail is not a list term", tail)
self.tail = tail
def accept(self, visitor, *args, **kargs):
return visitor.visitCons(self, *args, **kargs)
class Appl(Term):
'''Application term.'''
__slots__ = ['name', 'args', 'annotations']
type = types.APPL
def __init__(self, factory, name, args, annotations):
Term.__init__(self, factory)
if not isinstance(name, basestring):
raise TypeError("name is not a string", name)
self.name = name
self.args = tuple(args)
for arg in self.args:
if not isinstance(arg, Term):
raise TypeError("arg is not a term", arg)
if not isinstance(annotations, List):
raise TypeError("annotations is not a list", annotations)
self.annotations = annotations
def getArity(self):
return len(self.args)
def setAnnotations(self, annotations):
'''Return a copy of this term with the given annotations.'''
return self.factory.makeAppl(self.name, self.args, annotations)
def removeAnnotations(self):
'''Return a copy of this term with all annotations removed.'''
return self.factory.makeAppl(self.name, self.args)
def accept(self, visitor, *args, **kargs):
return visitor.visitAppl(self, *args, **kargs)
|
mewbak/idc
|
aterm/term.py
|
Python
|
lgpl-2.1
| 6,584
|
[
"VisIt"
] |
e581651f0c27797fc9af61ce01db0fb7f2d9cdb1f618a63c492b48c3414db683
|
# Clustering.py
#
# Cluster segments
# Version 3.0 14/09/20
# Authors: Stephen Marsland, Nirosha Priyadarshani, Julius Juodakis, Virginia Listanti
# AviaNZ bioacoustic analysis program
# Copyright (C) 2017--2020
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import random
import os, wavio
import librosa
import WaveletSegment
import WaveletFunctions
import SignalProc
import Segment
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from sklearn.mixture import GaussianMixture
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import DBSCAN
from sklearn.cluster import Birch
from sklearn.cluster import SpectralClustering
from sklearn.cluster import MeanShift
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import AffinityPropagation
# from sklearn.cluster import OPTICS
# from sklearn import cluster_optics_dbscan
from sklearn import metrics
from sklearn.manifold import TSNE
from statistics import mode
from sklearn.metrics.pairwise import pairwise_distances
class Clustering:
# This class implements various clustering algorithms and performance measures for the AviaNZ interface
# Based on scikit-learn
def __init__(self, features, labels, nclusters):
if not features == []:
features = StandardScaler().fit_transform(features)
self.features = features
self.targets = labels
self.n_clusters = nclusters
def custom_dist(self, x, y):
d, _ = librosa.sequence.dtw(x, y, metric='euclidean')
return d[d.shape[0] - 1][d.shape[1] - 1]
def clusteringScore1(self, labels_true, labels):
""" Evaluate clustering performance using different scores when ground truth labels are present.
"""
arc = self.adjustedRandScore(labels_true, labels)
ami = self.adjustedMutualInfo(labels_true, labels)
h = self.homogeneityScore(labels_true, labels)
c = self.completenessScore(labels_true, labels)
v = self.vMeasureScore(labels_true, labels)
return arc, ami, h, c, v
def clusteringScore2(self, features, labels):
""" Evaluate clustering performance using different scores when ground truth labels are NOT present.
"""
sc = self.silhouetteCoef(features, labels)
return sc
def homogeneityScore(self, labels_true, labels):
""" Homogeneity: each cluster contains only members of a single class.
score - between 0.0 and 1.0.
1.0 perfectly homogeneous
"""
hs = metrics.homogeneity_score(labels_true, labels)
print("Homogeneity: %0.3f" % hs)
return hs
def completenessScore(self, labels_true, labels):
""" Completeness: all members of a given class are assigned to the same cluster.
score - between 0.0 and 1.0.
1.0 perfectly complete
"""
cs = metrics.completeness_score(labels_true, labels)
print("Completeness: %0.3f" % cs)
return cs
def vMeasureScore(self, labels_true, labels):
""" V-measure is the harmonic mean between homogeneity and completeness.
score - between 0.0 and 1.0.
1.0 perfectly complete labeling
"""
vs = metrics.v_measure_score(labels_true, labels)
print("V-measure: %0.3f" % vs)
return vs
def adjustedRandScore(self, labels_true, labels):
""" Measures the similarity of the two assignments, ignoring permutations and with chance normalization.
score - between -1.0 and 1.0.
Random labelings will have score close to 0.0.
1.0 perfect match.
"""
ari = metrics.adjusted_rand_score(labels_true, labels)
print("Adjusted Rand Index: %0.3f" % ari)
return ari
def adjustedMutualInfo(self, labels_true, labels):
""" Adjusted Mutual Information between two clusterings. Measures the agreement of the two assignments,
ignoring permutations.
score - =< 1.0.
1.0 perfect match.
"""
ami = metrics.adjusted_mutual_info_score(labels_true, labels)
print("Adjusted Mutual Information: %0.3f" % ami)
return ami
def silhouetteCoef(self, features, labels):
""" When the ground truth labels are not present.
Mean Silhouette Coefficient of all samples.
Calculated using the mean intra-cluster distance and the mean nearest-cluster distance for each
sample.
score - between -1.0 and 1.0 (perfect).
score close to zero: overlapping clusters.
negative score: a sample has been assigned to the wrong cluster, as a different cluster is more similar.
"""
sc = metrics.silhouette_score(features, labels)
print("Silhouette Coefficient: %0.3f" % sc)
return sc
def kMeans(self, init='k-means++', n_clusters=8, n_init=10):
""" K-Means clustering.
Useful when: general-purpose, even cluster size, flat geometry, not too many clusters.
"""
model = KMeans(init=init, n_clusters=n_clusters, n_init=n_init)
model.fit(self.features)
return model
def miniBatchKmeans(self, n_clusters=8, init='k-means++', max_iter=100, batch_size=25):
""" Variant of the K-Means algorithm, uses mini-batches to reduce the computation time.
"""
model = MiniBatchKMeans(n_clusters=n_clusters, init=init, max_iter=max_iter, batch_size=batch_size)
model.fit(self.features)
return model
def meanShift(self):
""" A sliding-window-based algorithm that attempts to find dense areas of data points.
Usecase: many clusters, uneven cluster size, non-flat geometry.
"""
model = MeanShift()
model.fit(self.features)
return model
# def DBscan(self, eps=0.5, min_samples=5, metric='euclidean'):
def DBscan(self, eps=0.5, min_samples=5):
""" Density-Based Spatial Clustering of Applications with Noise. An extension to mean shift clustering.
Finds core samples of high density and expands clusters from them.
Usecase: non-flat geometry, uneven cluster sizes
"""
# model = DBSCAN(eps=eps, min_samples=min_samples, metric=metric)
# model = DBSCAN(eps=eps, min_samples=min_samples, metric=self.custom_dist)
model = DBSCAN(metric='precomputed')
d = pairwise_distances(self.features, self.features, metric=self.custom_dist)
# model.fit(self.features)
model.fit(d)
return model
def birch(self, threshold=0.5, branching_factor=50, n_clusters=3, compute_labels=True, copy=True):
""" Builds a tree called the Characteristic Feature Tree (CFT) for the given data. The data is essentially lossy
compressed to a set of Characteristic Feature nodes (CF Nodes).
Usecase: large dataset, outlier removal, data reduction
"""
model = Birch(threshold=threshold, branching_factor=branching_factor, n_clusters=n_clusters,
compute_labels=compute_labels, copy=copy)
model.fit(self.features)
return model
def spectralClustering(self, n_clusters=8, eigen_solver=None, random_state=None, n_init=10, gamma=1.0,
affinity='rbf', n_neighbors=10, eigen_tol=0.0, assign_labels='kmeans', degree=3,
coef0=1, kernel_params=None, n_jobs=None):
""" Requires the number of clusters to be specified. Good for small number of classes.
Usecase: few clusters, even cluster size, non-flat geometry.
"""
model = SpectralClustering(n_clusters=n_clusters, eigen_solver=eigen_solver, random_state=random_state,
n_init=n_init, gamma=gamma, affinity=affinity, n_neighbors=n_neighbors,
eigen_tol=eigen_tol, assign_labels=assign_labels, degree=degree, coef0=coef0,
kernel_params=kernel_params, n_jobs=n_jobs)
model.fit(self.features)
return model
def agglomerativeClustering(self, n_clusters=3, distance_threshold=None, linkage='ward', affinity='euclidean',
compute_full_tree=False):
""" A Hierarchical clustering using a bottom up approach: each observation starts in its own cluster, and
clusters are successively merged together.
Usecase: many clusters, possibly connectivity constraints, non Euclidean distances.
"""
model = AgglomerativeClustering(n_clusters=n_clusters, distance_threshold=distance_threshold, linkage=linkage,
affinity=affinity, compute_full_tree=compute_full_tree)
d = pairwise_distances(self.features, self.features, metric=self.custom_dist)
model.fit(d)
# model.fit(self.features)
return model
def GMM(self, n_components=3, covariance_type='full', tol=0.001, reg_covar=1e-06, max_iter=100, n_init=1,
init_params='kmeans'):
""" Gaussian mixture model. Not scalable.
Usecase: flat geometry, good for density estimation.
"""
model = GaussianMixture(n_components=n_components, covariance_type=covariance_type, tol=tol,
reg_covar=reg_covar, max_iter=max_iter, n_init=n_init, init_params=init_params)
model.fit(self.features)
model.labels_ = model.predict(self.features)
return model
def affinityPropagation(self, damping=0.5, max_iter=200, convergence_iter=15):
""" Affinity Propagation.
Usecase: many clusters, uneven cluster size, non-flat geometry.
"""
model = AffinityPropagation(damping=damping, max_iter=max_iter, convergence_iter=convergence_iter)
model.fit(self.features)
return model
def som(self, mapsize):
""" Self Organising Map
"""
import sompy
som = sompy.SOMFactory.build(self.features, [], mask=None, mapshape='planar', lattice='rect', normalization='var',
initialization='pca', neighborhood='gaussian', training='batch', name='sompy')
som.train()
return som
# def cluster(self, dirname, fs, species=None, feature='we', n_mels=24, minlen=0.2, denoise=False, alg='agglomerative'):
def cluster(self, dirname, fs, species=None, feature='we', n_mels=24, minlen=0.2, denoise=False,
alg='agglomerative'):
"""
Cluster segments during training to make sub-filters.
Given wav + annotation files,
1) identify syllables using median clipping/ FIR
2) make them to fixed-length by padding or clipping
3) use existing clustering algorithems
:param dir: path to directory with wav & wav.data files
:param fs: sample rate
:param species: string, optional. will train on segments containing this label
:param feature: 'we' (wavelet energy), 'mfcc', or 'chroma'
:param n_mels: number of mel coeff when feature='mfcc'
:param minlen: min syllable length in secs
:param denoise: True/False
:param alg: algorithm to use, default to agglomerative
:return: clustered segments - a list of lists [[file1, seg1, [syl1, syl2], [features1, features2], predict], ...]
fs, nclasses, syllable duration (median)
"""
self.alg = alg
nlevels = 6
weInds = []
# 1. Get the frequency band and sampling frequency from annotations
f1, f2 = self.getFrqRange(dirname, species, fs)
print("Clustering using sampling rate", fs)
# 2. Find the lower and upper bounds (relevant to the frq range)
if feature == 'mfcc' and f1 != 0 and f2 != 0:
mels = librosa.core.mel_frequencies(n_mels=n_mels, fmin=0.0, fmax=fs / 2, htk=False)
ind_flow = (np.abs(mels - f1)).argmin()
ind_fhigh = (np.abs(mels - f2)).argmin()
elif feature == 'we' and f1 != 0 and f2 != 0:
weInds = self.nodesInRange(nlevels, f1, f2, fs)
# 3. Clustering at syllable level, therefore find the syllables in each segment
dataset = self.findSyllables(dirname, species, minlen, fs, f1, f2, denoise)
# dataset format: [[file1, seg1, syl1], [file1, seg1, syl2], [file1, seg2, syl1],..]
# Make syllables fixed-length (again to have same sized feature matrices) and generate features
lengths = []
for data in dataset:
lengths.append(data[2][1] - data[2][0])
duration = np.median(lengths)
print("- Setting duration to", duration)
# duration is going to be the fixed length of a syllable, if a syllable too long clip it
for record in dataset:
if record[2][1] - record[2][0] > duration:
middle = (record[2][1] + record[2][0]) / 2
record[2][0] = middle - duration / 2
record[2][1] = middle + duration / 2
# 4. Read the syllables and generate features, also zero padding short syllables
features = []
for record in dataset:
audiodata = self.loadFile(filename=record[0], duration=record[2][1] - record[2][0], offset=record[2][0], fs=fs, denoise=denoise, f1=f1, f2=f2, silent=True)
audiodata = audiodata.tolist()
if record[2][1] - record[2][0] < duration:
# Zero padding both ends to have fixed duration
gap = int((duration * fs - len(audiodata)) // 2)
z = [0] * gap
audiodata.extend(z)
z.extend(audiodata)
audiodata = z
if feature == 'mfcc': # MFCC
mfcc = librosa.feature.mfcc(y=np.asarray(audiodata), sr=fs, n_mfcc=n_mels)
if f1 != 0 and f2 != 0:
mfcc = mfcc[ind_flow:ind_fhigh, :] # Limit the frequency to the fixed range [f1, f2]
mfcc_delta = librosa.feature.delta(mfcc, mode='nearest')
mfcc = np.concatenate((mfcc, mfcc_delta), axis=0)
mfcc = scale(mfcc, axis=1)
mfcc = [i for sublist in mfcc for i in sublist]
features.append(mfcc)
record.insert(3, mfcc)
elif feature == 'we': # Wavelet Energy
ws = WaveletSegment.WaveletSegment(spInfo={})
we = ws.computeWaveletEnergy(data=audiodata, sampleRate=fs, nlevels=nlevels, wpmode='new')
we = we.mean(axis=1)
if weInds:
we = we[weInds]
# if f1 != 0 and f2 != 0:
# we = we[ind_flow:ind_fhigh] # Limit the frequency to a fixed range f1, f2
features.append(we)
record.insert(3, we)
elif feature == 'chroma':
chroma = librosa.feature.chroma_cqt(y=audiodata, sr=fs)
# chroma = librosa.feature.chroma_stft(y=data, sr=fs)
chroma = scale(chroma, axis=1)
features.append(chroma)
record.insert(3, chroma)
# 5. Actual clustering
# features = TSNE().fit_transform(features)
self.features = features
model = self.trainModel()
predicted_labels = model.labels_
print(predicted_labels)
# clusters = len(set(model.labels_))
# Attach the label to each syllable
for i in range(len(predicted_labels)):
dataset[i].insert(4, predicted_labels[i]) # dataset format [[file1, seg1, syl1, features, predict], ...]
clustered_dataset = []
for record in dataset:
if record[:2] not in clustered_dataset:
clustered_dataset.append(record[:2]) # clustered_dataset [[file1, seg1], ...]
labels = [[] for i in range(len(clustered_dataset))]
for i in range(len(predicted_labels)):
ind = clustered_dataset.index(dataset[i][:2])
labels[ind].append(predicted_labels[i])
# Majority voting when multiple syllables in a segment
for i in range(len(labels)):
try:
labels[i] = mode(labels[i])
except:
labels[i] = labels[i][0]
# Add the detected syllables
for record in clustered_dataset:
record.insert(2, [])
for rec in dataset:
if record[:2] == rec[:2]:
record[2].append(rec[2])
# Add the features
for record in clustered_dataset:
record.insert(3, [])
for rec in dataset:
if record[:2] == rec[:2]:
record[3].append(rec[3])
# Make the labels continous, e.g. agglomerative may have produced 0, 2, 3, ...
ulabels = list(set(labels))
nclasses = len(ulabels)
dic = []
for i in range(nclasses):
dic.append((ulabels[i], i))
dic = dict(dic)
# Update the labels
for i in range(len(clustered_dataset)):
clustered_dataset[i].insert(4, dic[labels[i]])
# clustered_dataset format: [[file1, seg1, [syl1, syl2], [features1, features2], predict], ...]
return clustered_dataset, nclasses, duration
def nodesInRange(self, nlevels, f1, f2, fs):
''' Return the indices (nodes) to keep
'''
allnodes = range(1, 2 ** (nlevels + 1) - 1)
inband = []
for i in allnodes:
flow, fhigh = WaveletFunctions.getWCFreq(i, fs)
if flow < f2 and fhigh > f1:
inband.append(i-1)
return inband
def getFrqRange(self, dirname, species, fs):
''' Get the frequency band and sampling frequency from annotations
'''
lowlist = []
highlist = []
# Directory mode (from the training dialog)
if os.path.isdir(dirname):
for root, dirs, files in os.walk(str(dirname)):
for file in files:
if file.lower().endswith('.wav') and file + '.data' in files:
# wavrate = wavio.readFmt(os.path.join(root, file))[0]
# srlist.append(wavrate)
# Read the annotation
segments = Segment.SegmentList()
segments.parseJSON(os.path.join(root, file + '.data'))
# keep the right species
if species:
thisSpSegs = segments.getSpecies(species)
else:
thisSpSegs = np.arange(len(segments)).tolist()
for segix in thisSpSegs:
seg = segments[segix]
lowlist.append(seg[2])
highlist.append(seg[3])
# File mode (from the main interface)
elif os.path.isfile(dirname):
if dirname.lower().endswith('.wav') and os.path.exists(dirname + '.data'):
# wavrate = wavio.readFmt(dirname)[0]
# srlist.append(wavrate)
# Read the annotation
segments = Segment.SegmentList()
segments.parseJSON(dirname + '.data')
# keep the right species
if species:
thisSpSegs = segments.getSpecies(species)
else:
thisSpSegs = np.arange(len(segments)).tolist()
for segix in thisSpSegs:
seg = segments[segix]
lowlist.append(seg[2])
highlist.append(seg[3])
if len(thisSpSegs) < self.n_clusters:
self.n_clusters = len(thisSpSegs)//2
print('Setting number of clusters to ', self.n_clusters)
# Sampling rate is coming from the first page in the wavelet training wizard
# # Set sampling frequency based on segments and min samp. frq from the file list
# arr = [4000, 8000, 16000, 32000, 48000]
# pos = np.abs(arr - np.median(highlist) * 2).argmin()
# fs = arr[pos]
# if fs > np.min(srlist):
# fs = np.min(srlist)
# Find frequency limits
# TODO: Made fixed in order to have same sized feature matrices, can we vary this to use segment frequency limits?
if len(lowlist) > 0:
f1 = np.min(lowlist)
f2 = np.median(highlist)
else:
f1 = 0
f2 = fs/2
if fs < f2 * 2 + 50:
f2 = fs // 2 - 50
if f2 < f1:
f2 = np.mean(highlist)
return f1, f2
def findSyllables(self, dirname, species, minlen, fs, f1, f2, denoise):
""" Find the syllables
:param dirname: directory with the sound and annotation files OR a single wave file (having its .data)
:param species: target species
:param minlen: minimum length of a segment
:param fs: sampling frequency
:param f1: lower frequency bound
:param f2: higher frequency bound
:param denoise: denoise or not binary
:return: a list of lists [[file1, seg1, syl1], [file1, seg1, syl2], [file1, seg2, syl1],..]
"""
dataset = []
if os.path.isdir(dirname):
for root, dirs, files in os.walk(str(dirname)):
for file in files:
if file.lower().endswith('.wav') and file + '.data' in files:
# Read the annotation
segments = Segment.SegmentList()
segments.parseJSON(os.path.join(root, file + '.data'))
if species:
thisSpSegs = segments.getSpecies(species)
else:
thisSpSegs = np.arange(len(segments)).tolist()
# Now find syllables within each segment, median clipping
for segix in thisSpSegs:
seg = segments[segix]
syls = self.findSyllablesSeg(os.path.join(root, file), seg, fs, denoise, minlen)
for syl in syls:
dataset.append([os.path.join(root, file), seg, syl])
elif os.path.isfile(dirname):
if dirname.lower().endswith('.wav') and os.path.exists(dirname + '.data'):
# Read the annotation
segments = Segment.SegmentList()
segments.parseJSON(dirname + '.data')
if species:
thisSpSegs = segments.getSpecies(species)
else:
thisSpSegs = np.arange(len(segments)).tolist()
# Now find syllables within each segment, median clipping
for segix in thisSpSegs:
seg = segments[segix]
syls = self.findSyllablesSeg(dirname, seg, fs, denoise, minlen)
for syl in syls:
dataset.append([dirname, seg, syl])
return dataset
def findSyllablesSeg(self, file, seg, fs, denoise, minlen):
""" Find syllables in the segment using median clipping - single segment
:return: syllables list
"""
# TODO: Use f1 and f2 to restrict spectrogram in median clipping to skip some of the noise
# audiodata = self.loadFile(filename=file, duration=seg[1] - seg[0], offset=seg[0], fs=fs, denoise=denoise, f1=f1, f2=f2)
audiodata = self.loadFile(filename=file, duration=seg[1] - seg[0], offset=seg[0], fs=fs, denoise=denoise)
start = seg[0]
sp = SignalProc.SignalProc()
sp.data = audiodata
sp.sampleRate = fs
_ = sp.spectrogram()
# Show only the segment frequencies to the median clipping and avoid overlapping noise - better than filtering when loading audiodata (it could make aliasing effect)
linear = np.linspace(0, fs / 2, int(sp.window_width/2))
# check segment type to determine if upper freq bound is OK
if seg[3]==0:
print("Warning: auto-detecting freq bound for full-height segments")
fhigh = fs//2
else:
fhigh = seg[3]
ind_flow = (np.abs(linear - seg[2])).argmin()
ind_fhigh = (np.abs(linear - fhigh)).argmin()
sp.sg = sp.sg[:, ind_flow:ind_fhigh]
segment = Segment.Segmenter(sp, fs)
syls = segment.medianClip(thr=3, medfiltersize=5, minaxislength=9, minSegment=50)
if len(syls) == 0: # Sanity check
# Try again with lower threshold
segment = Segment.Segmenter(sp, fs)
syls = segment.medianClip(thr=2, medfiltersize=5, minaxislength=9, minSegment=50)
# Merge overlapped segments
syls = segment.checkSegmentOverlap(syls)
syls = segment.deleteShort(syls, minlen)
syls = [[s[0] + start, s[1] + start] for s in syls]
# Sanity check, e.g. when user annotates syllables tight, median clipping may not detect it
if len(syls) == 0:
syls = [[start, seg[1]]]
if len(syls) == 1 and syls[0][1] - syls[0][0] < minlen: # Sanity check
syls = [[start, seg[1]]]
return syls
def trainModel(self):
""" Clustering model"""
if self.alg == 'DBSCAN':
print('\nDBSCAN--------------------------------------')
model = self.DBscan(eps=0.3, min_samples=3)
elif self.alg == 'Birch':
print('\nBirch----------------------------------------')
if not self.n_clusters:
model = self.birch(threshold=0.5, n_clusters=self.n_clusters)
else:
model = self.birch(threshold=0.88, n_clusters=None)
if self.alg == 'agglomerative':
print('\nAgglomerative Clustering----------------------')
# Either set n_clusters=None and compute_full_tree=T or distance_threshold=None
if not self.n_clusters:
model = self.agglomerativeClustering(n_clusters=None, distance_threshold=0.5, linkage='average', affinity='precomputed')
else:
model = self.agglomerativeClustering(n_clusters=self.n_clusters, distance_threshold=None, linkage='average', affinity='precomputed')
# # Either set n_clusters=None and compute_full_tree=T or distance_threshold=None
# if not self.n_clusters:
# model = self.agglomerativeClustering(n_clusters=None, compute_full_tree=True, distance_threshold=0.5,
# linkage='complete')
# else:
# model = self.agglomerativeClustering(n_clusters=self.n_clusters, compute_full_tree=False,
# distance_threshold=None, linkage='complete')
# # model.fit_predict(self.features)
return model
def getClusterCenter(self, cluster, fs, f1, f2, feature, duration, n_mels=24, denoise=False):
"""
Compute cluster centre of a cluster
:param cluster: segments of a cluster - a list of lists, each sublist represents a segment
[parent_audio_file, [segment], [syllables], [features], class_label]
:param feature: 'we' or 'mfcc' or 'chroma'
:param duration: the fixed duration of a syllable
:return: cluster centre, an array
"""
# Re-compute features to match with frquency range [f1, f2]
# Find the lower and upper bounds (relevant to the frq range), when the range is given
if feature == 'mfcc' and f1 != 0 and f2 != 0:
mels = librosa.core.mel_frequencies(n_mels=n_mels, fmin=0.0, fmax=fs / 2, htk=False)
ind_flow = (np.abs(mels - f1)).argmin()
ind_fhigh = (np.abs(mels - f2)).argmin()
elif feature == 'we' and f1 != 0 and f2 != 0:
linear = np.linspace(0, fs / 2, 62)
ind_flow = (np.abs(linear - f1)).argmin()
ind_fhigh = (np.abs(linear - f2)).argmin()
fc = []
for record in cluster:
# Compute the features of each syllable in this segment
for syl in record[2]:
audiodata = self.loadFile(filename=record[0], duration=syl[1] - syl[0], offset=syl[0], fs=fs, denoise=denoise, f1=f1, f2=f2, silent=True)
audiodata = audiodata.tolist()
if syl[1] - syl[0] < duration:
# Zero padding both ends to have fixed duration
gap = int((duration * fs - len(audiodata)) // 2)
z = [0] * gap
audiodata.extend(z)
z.extend(audiodata)
audiodata = z
if feature == 'mfcc': # MFCC
mfcc = librosa.feature.mfcc(y=np.asarray(audiodata), sr=fs, n_mfcc=n_mels)
if f1 != 0 and f2 != 0:
mfcc = mfcc[ind_flow:ind_fhigh, :] # Limit the frequency to the fixed range [f1, f2]
mfcc_delta = librosa.feature.delta(mfcc, mode='nearest')
mfcc = np.concatenate((mfcc, mfcc_delta), axis=0)
mfcc = scale(mfcc, axis=1)
mfcc = [i for sublist in mfcc for i in sublist]
fc.append(mfcc)
elif feature == 'we': # Wavelet Energy
ws = WaveletSegment.WaveletSegment(spInfo={})
we = ws.computeWaveletEnergy(data=audiodata, sampleRate=fs, nlevels=5, wpmode='new')
we = we.mean(axis=1)
if f1 != 0 and f2 != 0:
we = we[ind_flow:ind_fhigh] # Limit the frequency to a fixed range f1, f2
fc.append(we)
elif feature == 'chroma':
chroma = librosa.feature.chroma_cqt(y=audiodata, sr=fs)
# chroma = librosa.feature.chroma_stft(y=data, sr=fs)
chroma = scale(chroma, axis=1)
fc.append(chroma)
return np.mean(fc, axis=0)
def loadFile(self, filename, duration=0, offset=0, fs=0, denoise=False, f1=0, f2=0, silent=False):
"""
Read audio file and preprocess as required.
"""
if duration == 0:
duration = None
sp = SignalProc.SignalProc(256, 128)
sp.readWav(filename, duration, offset, silent=silent)
sp.resample(fs)
sampleRate = sp.sampleRate
audiodata = sp.data
# # pre-process
if denoise:
WF = WaveletFunctions.WaveletFunctions(data=audiodata, wavelet='dmey2', maxLevel=10, samplerate=fs)
audiodata = WF.waveletDenoise(thresholdType='soft', maxLevel=10)
if f1 != 0 and f2 != 0:
# audiodata = sp.ButterworthBandpass(audiodata, sampleRate, f1, f2)
audiodata = sp.bandpassFilter(audiodata, sampleRate, f1, f2)
return audiodata
def cluster_by_dist(self, dir, species, feature='we', n_mels=24, fs=0, minlen=0.2, f_1=0, f_2=0, denoise=False, single=False,
distance='dtw', max_clusters=10):
"""
Given wav + annotation files,
1) identify syllables using median clipping/ FIR
2) generate features WE/MFCC/chroma
3) calculate DTW distances and decide class/ generate new class
:param dir: directory of audio and annotations
:param feature: 'WE' or 'MFCC' or 'chroma'
:param n_mels: number of mel coefs for MFCC
:param fs: prefered sampling frequency, 0 leads to calculate it from the anotations
:param minlen: min syllable length in secs
:param f_1: lower frequency bound, 0 leads to calculate it from the anotations
:param f_2: upper frequency bound, 0 leads to calculate it from the anotations
:param denoise: wavelet denoise
:param single: True means when there are multiple syllables in a segment, add only one syllable to the cluster info
:param distance: 'dtw' or 'xcor'
:return: possible clusters
"""
import Segment
import SignalProc
from scipy import signal
# Get flow and fhigh for bandpass from annotations
lowlist = []
highlist = []
srlist = []
for root, dirs, files in os.walk(str(dir)):
for file in files:
if file.lower().endswith('.wav') and file + '.data' in files:
wavrate = wavio.readFmt(os.path.join(root, file))[0]
srlist.append(wavrate)
# Read the annotation
segments = Segment.SegmentList()
segments.parseJSON(os.path.join(root, file + '.data'))
# keep the right species
if species:
thisSpSegs = segments.getSpecies(species)
else:
thisSpSegs = np.arange(len(segments)).tolist()
for segix in thisSpSegs:
seg = segments[segix]
lowlist.append(seg[2])
highlist.append(seg[3])
print(lowlist)
print(highlist)
print(srlist)
if f_1 == 0:
f_1 = np.min(lowlist)
if f_2 == 0:
f_2 = np.median(highlist)
if fs == 0:
arr = [4000, 8000, 16000]
pos = np.abs(arr - np.median(highlist) * 2).argmin()
fs = arr[pos]
print('fs: ', fs)
if fs > np.min(srlist):
print(fs)
fs = np.min(srlist)
if fs < f_2 * 2 + 50:
f_2 = fs // 2 - 50
minlen_samples = minlen * fs
print('Frequency band:', f_1, '-', f_2)
print('fs: ', fs)
# Find the lower and upper bounds (relevant to the frq range), when the range is given
if feature == 'mfcc' and f_1 != 0 and f_2 != 0:
mels = librosa.core.mel_frequencies(n_mels=n_mels, fmin=0.0, fmax=fs / 2, htk=False)
ind_flow = (np.abs(mels - f_1)).argmin()
ind_fhigh = (np.abs(mels - f_2)).argmin()
elif feature == 'we' and f_1 != 0 and f_2 != 0:
linear = np.linspace(0, fs / 2, 62)
ind_flow = (np.abs(linear - f_1)).argmin()
ind_fhigh = (np.abs(linear - f_2)).argmin()
# Ready for clustering
max_clusters = max_clusters
n_clusters = 0
clusters = []
for root, dirs, files in os.walk(str(dir)):
for file in files:
if file.lower().endswith('.wav') and file + '.data' in files:
# Read the annotation
segments = Segment.SegmentList()
segments.parseJSON(os.path.join(root, file + '.data'))
# keep the right species
if species:
thisSpSegs = segments.getSpecies(species)
else:
thisSpSegs = np.arange(len(segments)).tolist()
# Sort the segments longest to shortest, would be a good idea to avoid making first class with only
# one member :)
segments_len = [segments[segix][1] - segments[segix][0] for segix in thisSpSegs]
inds = np.argsort(segments_len)[::-1]
sortedsegments = [segments[i] for i in inds]
# Now find syllables within each segment, median clipping
for seg in sortedsegments:
if seg[0] == -1:
continue
audiodata = self.loadFile(filename=os.path.join(root, file), duration=seg[1] - seg[0],
offset=seg[0], fs=fs, denoise=denoise, f1=f_1, f2=f_2)
start = int(seg[0] * fs)
sp = SignalProc.SignalProc(256, 128)
sp.data = audiodata
sp.sampleRate = fs
sgRaw = sp.spectrogram(256, 128)
segment = Segment.Segmenter(sp=sp, fs=fs)
syls = segment.medianClip(thr=3, medfiltersize=5, minaxislength=9, minSegment=50)
if len(syls) == 0: # Try again with FIR
syls = segment.segmentByFIR(threshold=0.05)
syls = segment.checkSegmentOverlap(syls) # merge overlapped segments
syls = [[int(s[0] * fs), int(s[1] * fs)] for s in syls]
if len(syls) == 0: # Sanity check, when annotating syllables tight,
syls = [[0, int((seg[1] - seg[0]) * fs)]] # median clipping doesn't detect it.
if len(syls) > 1:
# TODO: samples to seconds
syls = segment.joinGaps(syls, minlen_samples) # Merge short segments
if len(syls) == 1 and syls[0][1] - syls[0][0] < minlen_samples: # Sanity check
syls = [[0, int((seg[1] - seg[0]) * fs)]]
temp = [[np.round((x[0] + start) / fs, 2), np.round((x[1] + start) / fs, 2)] for x in syls]
print('\nCurrent:', seg, '--> syllables >', minlen, 'secs ', temp)
# Calculate features of the syllables in the current segment.
f = []
for s in syls:
data = audiodata[s[0]:s[1]]
if feature == 'mfcc': # MFCC
mfcc = librosa.feature.mfcc(y=data, sr=fs, n_mfcc=n_mels)
if f_1 != 0 and f_2 != 0:
mfcc = mfcc[ind_flow:ind_fhigh, :] # Limit the frequency to the fixed range [f_1, f_2]
mfcc_delta = librosa.feature.delta(mfcc, mode='nearest')
mfcc = np.concatenate((mfcc, mfcc_delta), axis=0)
mfcc = scale(mfcc, axis=1)
# librosa.display.specshow(mfcc, sr=fs, x_axis='time')
# m = [i for sublist in mfcc for i in sublist]
f.append(mfcc)
elif feature == 'we': # Wavelet Energy
ws = WaveletSegment.WaveletSegment(spInfo={})
we = ws.computeWaveletEnergy(data=data, sampleRate=fs, nlevels=5, wpmode='new')
we = we.mean(axis=1)
if f_1 != 0 and f_2 != 0:
we = we[ind_flow:ind_fhigh] # Limit the frequency to a fixed range f_1, f_2
f.append(we)
elif feature == 'chroma':
chroma = librosa.feature.chroma_cqt(y=data, sr=fs)
# chroma = librosa.feature.chroma_stft(y=data, sr=fs)
chroma = scale(chroma, axis=1)
f.append(chroma)
matched = False
if n_clusters == 0:
print('**Case 1: First class')
newclass = self.class_create(label=n_clusters, syl=syls, features=f, f_low=seg[2],
f_high=seg[3], segs=[(os.path.join(root, file), seg)],
single=single, dist_method=distance)
clusters.append(newclass)
n_clusters += 1
print('Created new class: Class ', "'", newclass["label"], "'", ',\tIn-class_d: ',
newclass["d"], '\tf_low: ', newclass["f_low"], '\tf_high: ', newclass["f_high"])
matched = True
if not matched:
# See if the syllables in the current seg match with any existing class
min_ds = [] # Keep track of the minimum distances to each class
clusters = random.sample(clusters, len(clusters)) # Shuffle the clusters to avoid bias
for c in range(len(clusters)):
f_c = clusters[c]["features"] # features of the current class c
dist_c = np.zeros((len(f_c), len(f))) # distances to the current class c
for i in range(len(f_c)):
for j in range(len(f)):
if distance == 'dtw':
d, _ = librosa.sequence.dtw(f_c[i], f[j], metric='euclidean')
dist_c[i, j] = d[d.shape[0] - 1][d.shape[1] - 1]
elif distance == 'xcor':
corr = signal.correlate(f_c[i], f[j], mode='full')
dist_c[i, j] = np.sum(corr) / max(len(f_c[i]), len(f[j]))
# Min distance to the current class
print('Distance to Class ', clusters[c]["label"], ': ', np.amin(dist_c[dist_c != 0]),
'( In-class distance: ', clusters[c]["d"], ')')
min_ds.append(np.amin(dist_c[dist_c != 0]))
# Now get the clusters sorted according to the min dist
ind = np.argsort(min_ds)
min_ds = np.sort(min_ds)
# make the cluster order
clusters = [clusters[i] for i in ind]
for c in range(len(clusters)):
if (clusters[c]["d"] != 0) and min_ds[c] < (clusters[c]["d"] + clusters[c]["d"] * 0.1):
print('**Case 2: Found a match with a class > one syllable')
print('Class ', clusters[c]["label"], ', dist ', min_ds[c])
# Update this class
clusters[c] = self.class_update(cluster=clusters[c], newfeatures=f, newf_low=seg[2],
newf_high=seg[3], newsyl=syls,
newseg=(os.path.join(root, file), seg), single=single,
dist_method=distance)
matched = True
break # found a match, exit from the for loop, go to the next segment
elif c < len(clusters) - 1:
continue # continue to the next class
# Checked most of the classes by now, if still no match found, check the classes with only one
# data point (clusters[c]["d"] == 0).
# Note the arbitrary thr.
if not matched:
if distance == 'dtw':
thr = 25
elif distance == 'xcor':
thr = 1000
for c in range(len(clusters)):
if clusters[c]["d"] == 0 and min_ds[c] < thr:
print('**Case 3: In-class dist of ', clusters[c]["label"], '=', clusters[c]["d"],
'and this example < ', thr, ' dist')
print('Class ', clusters[c]["label"], ', dist ', min_ds[c])
# Update this class
clusters[c] = self.class_update(cluster=clusters[c], newfeatures=f, newf_low=seg[2],
newf_high=seg[3], newsyl=syls,
newseg=(os.path.join(root, file), seg), single=single,
dist_method=distance)
matched = True
break # Break the search and go to the next segment
# If no match found yet, check the max clusters
if not matched:
if n_clusters == max_clusters:
print('**Case 4: Reached max classes, therefore adding current seg to the closest '
'class... ')
# min_ind = np.argmin(min_ds)
# classes are sorted in ascending order of distance already
for c in range(len(clusters)):
if min_ds[c] <= 4 * clusters[c]["d"] or clusters[c]["d"] == 0:
print('Class ', clusters[c]["label"], ', dist ', min_ds[c],
'(in-class distance:', clusters[c]["d"], ')')
# Update this class
clusters[c] = self.class_update(cluster=clusters[c], newfeatures=f, newf_low=seg[2],
newf_high=seg[3], newsyl=syls,
newseg=(os.path.join(root, file), seg),
single=single,
dist_method=distance)
matched = True
break
if not matched:
print('Class ', clusters[0]["label"], ', dist ', min_ds[0],
'(in-class distance:', clusters[0]["d"], ')')
# Update this class
# TODO: don't update the class as it is an outlier?
clusters[0] = self.class_update(cluster=clusters[0], newfeatures=f, newf_low=seg[2],
newf_high=seg[3], newsyl=syls,
newseg=(os.path.join(root, file), seg), single=single,
dist_method=distance)
matched = True
continue # Continue to next segment
# If still no luck, create a new class
if not matched:
print('**Case 5: None of Case 1-4')
newclass = self.class_create(label=n_clusters, syl=syls, features=f, f_low=seg[2], f_high=seg[3],
segs=[(os.path.join(root, file), seg)], single=single,
dist_method=distance)
print('Created a new class: Class ', n_clusters + 1)
clusters.append(newclass)
n_clusters += 1
print('Created new class: Class ', "'", newclass["label"], "'", ',\tin-class_d: ',
newclass["d"], '\tf_low: ', newclass["f_low"], '\tf_high: ', newclass["f_high"])
print('\n\n--------------Clusters created-------------------')
clustered_segs = []
for c in range(len(clusters)):
print('Class ', clusters[c]['label'], ': ', len(clusters[c]['segs']))
for s in range(len(clusters[c]['segs'])):
print('\t', clusters[c]['segs'][s])
if single:
clustered_segs.append([clusters[c]['segs'][s][0], clusters[c]['segs'][s][1],
[clusters[c]['features'][s]], clusters[c]['label']])
else:
clustered_segs.append([clusters[c]['segs'][s][0], clusters[c]['segs'][s][1], clusters[c]['label']])
# Clustered segments
print('\n\n################### Clustered segments ############################')
for s in clustered_segs:
print(s)
return clustered_segs, fs, n_clusters, 1
# return clustered_dataset, fs, nclasses, duration
def class_create(self, label, syl, features, f_low, f_high, segs, single=False, dist_method='dtw'):
""" Create a new class
:param label: label of the new class
:param syl: syllables
:param features:
:param f_low:
:param f_high:
:param segs:
:param single: True if only one syllable from the segment goes to the class templates
:return:
"""
from scipy import signal
dist = np.zeros((len(features), len(features)))
shift = 0
for i in range(len(features)):
shift += 1
for j in range(shift, len(features)):
if dist_method == 'dtw':
d, _ = librosa.sequence.dtw(features[i], features[j], metric='euclidean')
dist[i, j] = d[d.shape[0] - 1][d.shape[1] - 1]
elif dist_method == 'xcor':
corr = signal.correlate(features[i], features[j], mode='full')
dist[i, j] = np.sum(corr) / max(len(features[i]), len(features[j]))
if np.count_nonzero(dist) > 0:
nonzero = dist > 0
inclass_d = np.percentile(dist[nonzero], 10) # TODO: max? mean? a percentile?
else:
inclass_d = 0
if single:
features = [features[len(features) // 2]] # get the features of the middle syllable
newclass = {
"label": label,
"d": inclass_d,
"syl": syl,
"features": features,
"f_low": f_low,
"f_high": f_high,
"segs": segs
}
return newclass
def class_update(self, cluster, newfeatures, newf_low, newf_high, newsyl, newseg, single, dist_method='dtw'):
""" Update an existing class
:param cluster: the class to update
:param newfeatures:
:param newf_low:
:param newf_high:
:param newsyl:
:param newsegs:
:return: the updated cluster
"""
from scipy import signal
# Get in-class distance
f_c = cluster["features"] # features of the current class c
if single:
newfeatures = [newfeatures[len(newfeatures) // 2]]
newsyl = [newsyl[len(newsyl) // 2]]
for i in range(len(newfeatures)):
f_c.append(newfeatures[i])
dist_c = np.zeros((len(f_c), len(f_c))) # distances to the current class c
shift = 0
for i in range(len(f_c)):
shift += 1
for j in range(shift, len(f_c)):
if dist_method == 'dtw':
d, _ = librosa.sequence.dtw(f_c[i], f_c[j], metric='euclidean')
dist_c[i, j] = d[d.shape[0] - 1][d.shape[1] - 1]
elif dist_method == 'xcor':
corr = signal.correlate(f_c[i], f_c[j], mode='full')
dist_c[i, j] = np.sum(corr) / max(len(f_c[i]), len(f_c[j]))
if np.count_nonzero(dist_c) > 0:
nonzero = dist_c > 0
inclass_d = np.percentile(dist_c[nonzero], 10) # TODO: max? mean? a percentile?
else:
inclass_d = 0
for s in newsyl:
cluster["syl"].append(s)
for fe in newfeatures:
cluster["features"].append(fe)
cluster["d"] = inclass_d
cluster["f_low"] = (newf_low + cluster["f_low"]) / 2 # not sure if this is correct
cluster["f_high"] = (newf_high + cluster["f_high"]) / 2
cluster["segs"].append(newseg)
print('Updated Class ', "'", cluster["label"], "'" '\tin-class_d: ',
cluster["d"], '\tf_low: ', cluster["f_low"], '\tf_high: ',
cluster["f_high"])
return cluster
|
smarsland/AviaNZ
|
Clustering.py
|
Python
|
gpl-3.0
| 53,528
|
[
"Gaussian"
] |
265ce6968f65710170c4a70c005a76c37ab5142fd95f5396632f75fd2c445143
|
# Copyright 2018 David Huberdeau & Peter Kok
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Inverted Encoding Model (IEM)
Method to decode and reconstruct features from data.
The implementation is roughly based on the following publications:
[Kok2013] "1.Kok, P., Brouwer, G. J., Gerven, M. A. J. van &
Lange, F. P. de. Prior Expectations Bias Sensory Representations
in Visual Cortex. J. Neurosci. 33, 16275–16284 (2013).
[Brouwer2011] "2.Brouwer, G. J. & Heeger, D. J. Cross-orientation
suppression in human visual cortex. J. Neurophysiol. 106(5):
2108-2119 (2011).
[Brouwer2009] "3.Brouwer, G. J. & Heeger, D. J.
Decoding and Reconstructing Color from Responses in Human Visual
Cortex. J. Neurosci. 29, 13992–14003 (2009).
This implementation uses a set of sinusoidal
basis functions to represent the set of possible feature values.
A feature value is some characteristic of a stimulus, e.g. the
angular location of a target along a horizontal line. This code was
written to give some flexibility compared to the specific instances
in Kok, 2013 & in Brouwer, 2009. Users can set the number of basis
functions, or channels, and the range of possible feature values.
There are separate classes for reconstructing feature values in a
1-dimensional (1D) space or in a 2-dimensional (2D) space.
"""
# Authors: David Huberdeau (Yale University) &
# Peter Kok (Yale University), 2018 &
# Vy Vo (Intel Corp., UCSD), 2019
import logging
import warnings
import numpy as np
import scipy.stats
from sklearn.base import BaseEstimator
from sklearn.metrics.pairwise import cosine_distances, euclidean_distances
from ..utils.utils import circ_dist
__all__ = ["InvertedEncoding1D",
"InvertedEncoding2D"]
logger = logging.getLogger(__name__)
MAX_CONDITION_CHECK = 9000
class InvertedEncoding1D(BaseEstimator):
"""Basis function-based reconstruction method
Inverted encoding models (alternatively known as forward models) are used
to reconstruct a feature represented in some N-dimensional space, here 1D,
(e.g. color of a stimulus) from patterns across voxels in functional data.
The model uses n_channels number of idealized basis functions and assumes
that the transformation from stimulus feature (e.g. color) to basis
function is one- to-one and invertible. The response of a voxel is
expressed as the weighted sum of basis functions. In this implementation,
basis functions were half-wave rectified sinusoid functions raised to a
power set by the user (e.g. 6).
The model:
Inverted encoding models reconstruct a stimulus feature from
patterns of BOLD activity by relating the activity in each
voxel, B, to the values of hypothetical channels (or basis
functions), C, according to Equation 1 below.
(1) B = W*C
where W is a weight matrix that represents the relationship
between BOLD activity and Channels. W must be estimated from
training data; this implementation (and most described in the
literature) uses linear regression to estimate W as in Equation
2 below [note: inv() represents matrix inverse or
pseudo-inverse].
(2) W_est = B_train*inv(C_train)
The weights in W_est (short for "estimated") represent the
contributions of each channel to the response of each voxel.
Estimated channel responses can be computed given W_est and
new voxel activity represented in matrix B_exp (short for
"experiment") through inversion of Equation 1:
(3) C_est = inv(W_est)*B_exp
Given estimated channel responses, C_est, it is straightforward
to obtain the reconstructed feature value by summing over
channels multiplied by their channel responses and taking the
argmax (i.e. the feature associated with the maximum value).
Using this model:
Use fit() to estimate the weights of the basis functions given
input data (e.g. beta values from fMRI data). This function
will execute equation 2 above.
Use predict() to compute predicted stimulus values
from new functional data. This function computes estimated
channel responses, as in equation 3, then computes summed
channel output and finds the argmax (within the stimulus
feature space) associated with those responses.
Use score() to compute a measure of the error of the prediction
based on known stimuli.
This implementation assumes a circular (or half-
circular) feature domain. Future implementations might
generalize the feature input space, and increase the
possible dimensionality.
Parameters
----------
n_channels: int, default 5. Number of channels
The number of channels, or basis functions, to be used in
the inverted encoding model.
channel_exp: int, default 6. Basis function exponent.
The exponent of the sinuoidal basis functions, which
establishes the width of the functions.
stimulus_mode: str, default 'halfcircular' (other option is
'circular'). Describes the feature domain.
range_start: double, default 0. Lowest value of domain.
Beginning value of range of independent variable
(usually degrees).
range_stop: double, default 180. Highest value of domain.
Ending value of range of independent variable
(usually degrees).
channel_density: int, default 180. Number of points in the
feature domain.
stimulus_resolution: double, default None will set the stimulus
resolution to be identical to the channel density. This sets
the resolution at which the stimuli were presented (e.g. a
spatial position with some width has a lower stimulus
resolution).
Attributes
----------
channels_: [n_channels, channel density] NumPy 2D array
matrix defining channel values
W_: sklearn.linear_model model containing weight matrix that
relates estimated channel responses to response amplitude
data
See get_params() for the rest of the attributes.
"""
def __init__(self, n_channels=6, channel_exp=5,
stimulus_mode='halfcircular', range_start=0., range_stop=180.,
channel_density=180, stimulus_resolution=None):
self.n_channels = n_channels
self.channel_exp = channel_exp
self.stimulus_mode = stimulus_mode
self.range_start = range_start
self.range_stop = range_stop
self.channel_density = channel_density
self.channel_domain = np.linspace(range_start, range_stop - 1,
channel_density)
if stimulus_resolution is None:
self.stim_res = channel_density
else:
self.stim_res = stimulus_resolution
self._check_params()
def _check_params(self):
if self.range_start >= self.range_stop:
raise ValueError("range_start {} must be less than "
"{} range_stop.".format(self.range_start,
self.range_stop))
if self.stimulus_mode == 'halfcircular':
if (self.range_stop - self.range_start) != 180.:
raise ValueError("For half-circular feature spaces,"
"the range must be 180 degrees, "
"not {}".
format(self.range_stop - self.range_start))
elif self.stimulus_mode == 'circular':
if (self.range_stop - self.range_start) != 360.:
raise ValueError("For circular feature spaces, the"
" range must be 360 degrees"
"not {}".
format(self.range_stop - self.range_start))
if self.n_channels < 2:
raise ValueError("Insufficient number of channels.")
if not np.isin(self.stimulus_mode, ['circular', 'halfcircular']):
raise ValueError("Stimulus mode must be one of these: "
"'circular', 'halfcircular'")
def fit(self, X, y):
"""Use data and feature variable labels to fit an IEM
Parameters
----------
X: numpy matrix of voxel activation data. [observations, voxels]
Should contain the beta values for each observation or
trial and each voxel of training data.
y: numpy array of response variable. [observations]
Should contain the feature for each observation in X.
"""
# Check that data matrix is well conditioned:
if np.linalg.cond(X) > MAX_CONDITION_CHECK:
logger.error("Data is singular.")
raise ValueError("Data matrix is nearly singular.")
if X.shape[0] < self.n_channels:
logger.error("Not enough observations. Cannot calculate "
"pseudoinverse.")
raise ValueError("Fewer observations (trials) than "
"channels. Cannot compute pseudoinverse.")
# Check that the data matrix is the right size
shape_data = np.shape(X)
shape_labels = np.shape(y)
if len(shape_data) != 2:
raise ValueError("Data matrix has too many or too few "
"dimensions.")
else:
if shape_data[0] != shape_labels[0]:
raise ValueError("Mismatched data samples and label samples")
# Define the channels (or basis set)
self.channels_, channel_centers = self._define_channels()
logger.info("Defined channels centered at {} degrees.".format(
np.rad2deg(channel_centers)))
# Create a matrix of channel activations for every observation.
# (i.e., C1 in Brouwer & Heeger 2009.)
C = self._define_trial_activations(y)
# Solve for W in B = WC
self.W_ = X.transpose() @ np.linalg.pinv(C.transpose())
if np.linalg.cond(self.W_) > MAX_CONDITION_CHECK:
logger.error("Weight matrix is nearly singular.")
raise ValueError("Weight matrix is nearly singular.")
return self
def predict(self, X):
"""Use test data to predict the feature
Parameters
----------
X: numpy matrix of voxel activation from test trials
[observations, voxels]. Used to predict feature
associated with the given observation.
Returns
-------
model_prediction: numpy array of estimated feature values.
"""
# Check that the data matrix is the right size
shape_data = np.shape(X)
if len(shape_data) != 2:
raise ValueError("Data matrix has too many or too few "
"dimensions.")
model_prediction = self._predict_features(X)
return model_prediction
def score(self, X, y):
"""Calculate error measure of prediction. Default measurement
is R^2, the coefficient of determination.
Parameters
----------
X: numpy matrix of voxel activation from new data
[observations,voxels]
y: numpy array of responses. [observations]
Returns
-------
score_value: the error measurement between the actual
feature and predicted features.
"""
pred_features = self.predict(X)
if self.stimulus_mode == 'halfcircular':
# multiply features by 2. otherwise doesn't wrap properly
pred_features = pred_features * 2
y = y * 2
ssres = (circ_dist(np.deg2rad(y),
np.deg2rad(pred_features)) ** 2).sum()
sstot = (circ_dist(np.deg2rad(y),
np.ones(y.size) * scipy.stats.circmean(
np.deg2rad(y))) ** 2).sum()
score_value = (1 - ssres / sstot)
return score_value
def get_params(self, deep: bool = True):
"""Returns model parameters.
Returns
-------
params: parameter of this object
"""
return {"n_channels": self.n_channels, "channel_exp": self.channel_exp,
"stimulus_mode": self.stimulus_mode,
"range_start": self.range_start, "range_stop": self.range_stop,
"channel_domain": self.channel_domain,
"stim_res": self.stim_res}
def set_params(self, **parameters):
"""Sets model parameters after initialization.
Parameters
----------
parameters: structure with parameters and change values
"""
for parameter, value in parameters.items():
setattr(self, parameter, value)
setattr(self, "channel_domain",
np.linspace(self.range_start, self.range_stop - 1,
self.channel_density))
self._check_params()
return self
def _define_channels(self):
"""Define basis functions (aka channels).
Returns
-------
channels: numpy matrix of basis functions. dimensions are
[n_channels, function resolution].
channel_centers: numpy array of the centers of each channel
"""
channel_centers = np.linspace(np.deg2rad(self.range_start),
np.deg2rad(self.range_stop),
self.n_channels + 1)
channel_centers = channel_centers[0:-1]
# make sure channels are not bimodal if using 360 deg space
if self.stimulus_mode == 'circular':
domain = self.channel_domain * 0.5
centers = channel_centers * 0.5
elif self.stimulus_mode == 'halfcircular':
domain = self.channel_domain
centers = channel_centers
# define exponentiated function
channels = np.asarray(
[np.cos(np.deg2rad(domain) - cx) ** self.channel_exp for cx in
centers])
# half-wave rectification preserving circularity
channels = abs(channels)
return channels, channel_centers
def _define_trial_activations(self, stimuli):
"""Defines a numpy matrix of predicted channel responses for
each trial/observation.
Parameters
stimuli: numpy array of the feature values for each
observation
Returns
-------
C: matrix of predicted channel responses. dimensions are
number of observations by stimulus resolution
"""
stim_axis = np.linspace(self.range_start, self.range_stop - 1,
self.stim_res)
if self.range_start > 0:
stimuli = stimuli + self.range_start
elif self.range_start < 0:
stimuli = stimuli - self.range_start
one_hot = np.eye(self.stim_res)
indices = [np.argmin(abs(stim_axis - x)) for x in stimuli]
stimulus_mask = one_hot[indices, :]
if self.channel_density != self.stim_res:
if self.channel_density % self.stim_res == 0:
stimulus_mask = np.repeat(stimulus_mask,
self.channel_density / self.stim_res)
else:
raise NotImplementedError("This code doesn't currently support"
" stimuli which are not square "
"functions in the feature domain, or"
" stimulus widths that are not even"
"divisors of the number of points in"
" the feature domain.")
C = stimulus_mask @ self.channels_.transpose()
# Check that C is full rank
if np.linalg.matrix_rank(C) < self.n_channels:
warnings.warn("Stimulus matrix is {}, not full rank. May cause "
"issues with stimulus prediction/reconstruction.".
format(np.linalg.matrix_rank(C)), RuntimeWarning)
return C
def _predict_channel_responses(self, X):
"""Computes predicted channel responses from data
(e.g. C2 in Brouwer & Heeger 2009)
Parameters
----------
X: numpy data matrix. [observations, voxels]
Returns
-------
channel_response: numpy matrix of channel responses
"""
channel_response = np.matmul(np.linalg.pinv(self.W_), X.transpose())
return channel_response
def _predict_feature_responses(self, X):
"""Takes channel weights and transforms them into continuous
functions defined in the feature domain.
Parameters
---------
X: numpy matrix of data. [observations, voxels]
Returns
-------
pred_response: predict response from all channels. Used
to predict feature (e.g. direction).
"""
pred_response = np.matmul(self.channels_.transpose(),
self._predict_channel_responses(X))
return pred_response
def _predict_features(self, X):
"""Predicts feature value (e.g. direction) from data in X.
Takes the maximum of the 'reconstructed' or predicted response
function.
Parameters
---------
X: numpy matrix of data. [observations, voxels]
Returns
-------
pred_features: predicted feature from response across all
channels.
"""
pred_response = self._predict_feature_responses(X)
feature_ind = np.argmax(pred_response, 0)
pred_features = self.channel_domain[feature_ind]
return pred_features
class InvertedEncoding2D(BaseEstimator):
"""Basis function-based reconstruction method
Inverted encoding models (alternatively known as forward models) are used
to reconstruct a feature represented in a N-dimensional space, here 2D,
(e.g. position on a projector screen) from patterns across voxels in
functional data. The model uses some number of idealized basis functions
that cover the 2D space, and assumes that the transformation from
stimulus feature (e.g. 2D spatial position) to basis function is one-
to-one and invertible. The response of a voxel is expressed as the
weighted sum of basis functions. In this implementation, basis functions
were half-wave rectified sinusoid functions raised to some power (set by
the user).
The documentation will refer to the 'stimulus space' or 'stimulus domain',
which should be a 2D space in consistent units (e.g. screen pixels,
or degrees visual angle). The stimulus space is the domain in which the
stimulus is reconstructed. We will refer to the each point in this 2D
stimulus domain as a 'pixel'.
The model:
Inverted encoding models reconstruct a stimulus feature from
patterns of BOLD activity by relating the activity in each
voxel, B, to the values of hypothetical channels (or basis
functions), C, according to Equation 1 below.
(1) B = W*C
where W is a weight matrix that represents the relationship
between BOLD activity and Channels. W must be estimated from
training data; this implementation (and most described in the
literature) uses linear regression to estimate W as in Equation
2 below [note: inv() represents matrix inverse or
pseudo-inverse].
(2) W_est = B_train*inv(C_train)
The weights in W_est (short for "estimated") represent the
contributions of each channel to the response of each voxel.
Estimated channel responses can be computed given W_est and
new voxel activity represented in matrix B_exp (short for
"experiment") through inversion of Equation 1:
(3) C_est = inv(W_est)*B_exp
Given estimated channel responses, C_est, it is straightforward
to obtain the reconstructed feature value by summing over
channels multiplied by their channel responses and taking the
argmax (i.e. the feature associated with the maximum value).
Using this model:
Use fit() to estimate the weights of the basis functions given
input data (e.g. beta values from fMRI data). This function
will execute equation 2 above.
Use predict() to compute predicted stimulus values
from new functional data. This function computes estimated
channel responses, as in equation 3, then computes summed
channel output and finds the argmax (within the stimulus
feature space) associated with those responses.
Use score() to compute a measure of the error of the prediction
based on known stimuli.
Parameters
----------
stim_xlim: list of 2 floats Specifies the minimum and maximum x-values
of the area to be reconstructed. In order to be estimated properly, a
stimulus must appear at these limits. Specifying limits outside the
range of the stimuli can lead to spurious estimates.
stim_ylim: list of 2 floats Specifies the minimum and maximum y-values
of the area to be reconstructed. In order to be estimated properly, a
stimulus must appear at these limits. Specifying limits outside the
range of the stimuli can lead to spurious estimates.
stimulus_resolution: float or list of 2 floats. If a single float is
given, it will be expanded to a list (i.e. we will assume that the
reconstructed area is composed of square pixels).
stim_radius: float, or sequence of floats [n_stim], default None. If the
user does not define the design matrix of the encoding model (e.g. C
in B = W*C), it will be defined automatically on the assumption that
each observation was for a 2D circular stimulus of some radius.
chan_xlim: list of 2 floats, default None. Specifies the minimum and
maximum x-values of the channels, or basis functions.
chan_ylim: list of 2 floats, default None. Specifies the minimum and
maximum y-values of the channels, or basis functions.
channels: [n_channels, n_pixels] NumPy 2D array, default None. If None at
initialization, it can be defined with
either define_basis_functions_sqgrid() or
define_basis_functions_trigrid(), each of which tiles the given 2D
space with some grid (square or triangular/hexagonal, respectively).
Alternatively, the user can specify their own channels.
channel_exp: int, default 7. Basis function exponent. The exponent of the
sinuoidal basis functions, which helps control their width.
Attributes
----------
channels: [n_channels, n_pixels] NumPy 2D array defining channels
W_: sklearn.linear_model containing weight matrix that relates estimated
channel responses to response data
See get_params() for the rest of the attributes.
"""
def __init__(self, stim_xlim, stim_ylim, stimulus_resolution,
stim_radius=None, chan_xlim=None, chan_ylim=None,
channels=None, channel_exp=7):
"""Defines a 2D inverted encoding model object.
While the parameters defining the domain in which to reconstruct
the stimuli are required (e.g. all `stim_*` inputs), the parameters
to define the channels (`chan*`) are optional, in case the user
wishes to define their own channels (a.k.a basis functions).
Parameters
----------
stim_xlim: sequence of 2 float values, specifying the lower & upper
limits on the horizontal axis, respectively.
stim_ylim: sequence of 2 float values, specifying the lower & upper
limits on the vertical axis, respectively.
stimulus_resolution: a float or sequence of 2 floats, specifying the
number of pixels that exist in the x- and y- directions.
stim_radius: float, default None. The radius in pixels, assuming that
the stimulus is circular. If None, the user must either define it
before running fit(), or pass in a custom C in B = W*C.
chan_xlim: sequence of 2 float values, default None. Specifies the
lower & upper limits of the channels in the horizontal axis. If
None, the user must define this before using the class functions
to create basis functions, or pass in custom-defined channels.
chan_ylim: sequence of 2 float values, default None. Specifies the
lower & upper limits of the channels in the vertical axis. If
None, the user must define this before using the class functions
to create basis functions, or pass in custom-defined channels.
channel_exp: float or int, default None. The exponent for a
sinusoidal basis function. If None, it must be set before the
channels or defined, or pass in custom-defined channels.
"""
# Automatically expand stimulus_resolution if only one value is given.
# This will create a square field of view (FOV) for the
# reconstruction.
if not isinstance(stimulus_resolution, list): # make FOV square
stimulus_resolution = [stimulus_resolution, stimulus_resolution]
if (len(stim_xlim) != 2) or (len(stim_ylim) != 2):
raise ValueError("Stimulus limits should be a sequence, 2 values")
self.stim_fov = [stim_xlim, stim_ylim]
self.stim_pixels = [np.linspace(stim_xlim[0], stim_xlim[1],
stimulus_resolution[0]),
np.linspace(stim_ylim[0], stim_ylim[1],
stimulus_resolution[1])]
self.xp, self.yp = np.meshgrid(self.stim_pixels[0],
self.stim_pixels[1])
self.stim_radius_px = stim_radius
self.channels = channels
if self.channels is None:
self.n_channels = None
else:
self.n_channels = self.channels.shape[0]
if chan_xlim is None:
chan_xlim = stim_xlim
logger.info("Set channel x-limits to stimulus x-limits", stim_xlim)
if chan_ylim is None:
chan_ylim = stim_ylim
logger.info("Set channel y-limits to stimulus y-limits", stim_ylim)
self.channel_limits = [chan_xlim, chan_ylim]
self.channel_exp = channel_exp
self._check_params()
def _check_params(self):
if len(self.stim_fov) != 2:
raise ValueError("Stim FOV needs to have an x-list and a y-list")
elif len(self.stim_fov[0]) != 2 or len(self.stim_fov[1]) != 2:
raise ValueError("Stimulus limits should be a sequence, 2 values")
else:
if (self.stim_fov[0][0] >= self.stim_fov[0][1]) or \
(self.stim_fov[1][0] >= self.stim_fov[1][1]):
raise ValueError("Stimulus x or y limits should be ascending "
"values")
if self.xp.size != self.yp.size:
raise ValueError("xpixel grid and ypixel grid do not have same "
"number of elements")
if self.n_channels and np.all(self.channels):
if self.n_channels != self.channels.shape[0]:
raise ValueError("Number of channels {} does not match the "
"defined channels: {}".
format(self.n_channels,
self.channels.shape[0]))
if self.channels.shape[1] != self.xp.size:
raise ValueError("Defined {} channels over {} pixels, but "
"stimuli are represented over {} pixels. "
"Pixels should match.".
format(self.n_channels,
self.channels.shape[1],
self.xp.size))
def fit(self, X, y, C=None):
"""Use data and feature variable labels to fit an IEM
Parameters
----------
X: numpy matrix of voxel activation data. [observations, voxels]
Should contain the beta values for each observation or
trial and each voxel of training data.
y: numpy array of response variable. [observations]
Should contain the feature for each observation in X.
C: numpy matrix of channel activations for every observation (e.g.
the design matrix C in the linear equation B = W*C), matrix size
[observations, pixels]. If None (default), this assumes that each
observation contains a 2D circular stimulus and will define the
activations with self._define_trial_activations(y).
"""
# Check that data matrix is well conditioned:
if np.linalg.cond(X) > MAX_CONDITION_CHECK:
logger.error("Data is singular.")
raise ValueError("Data matrix is nearly singular.")
if self.channels is None:
raise ValueError("Must define channels (set of basis functions).")
if X.shape[0] < self.n_channels:
logger.error("Not enough observations. Cannot calculate "
"pseudoinverse.")
raise ValueError("Fewer observations (trials) than "
"channels. Cannot compute pseudoinverse.")
# Check that the data matrix is the right size
shape_data = np.shape(X)
shape_labels = np.shape(y)
if shape_data[0] != shape_labels[0]:
raise ValueError("Mismatched data samples and label samples")
if C is None:
# Create a matrix of channel activations for every observation.
# (i.e., C1 in Brouwer & Heeger 2009.)
C = self._define_trial_activations(y)
# Solve for W in B = WC
self.W_ = X.transpose() @ np.linalg.pinv(C.transpose())
if np.linalg.cond(self.W_) > MAX_CONDITION_CHECK:
logger.error("Weight matrix is nearly singular.")
raise ValueError("Weight matrix is nearly singular.")
return self
def predict(self, X):
"""Use test data to predict the feature
Parameters
----------
X: numpy matrix of voxel activation from test trials [observations,
voxels]. Used to predict feature associated with the given
observation.
Returns
-------
model_prediction: numpy array of estimated feature values.
"""
# Check that the data matrix is the right size
shape_data = np.shape(X)
if len(shape_data) != 2:
raise ValueError("Data matrix has too many or too few "
"dimensions.")
model_prediction = self._predict_features(X)
return model_prediction
def score(self, X, y):
"""Calculate error measure of prediction, assuming that the predicted
feature is at the maximum of the reconstructed values.
To score the reconstructions against expected features defined in the
stimulus domain (i.e. in pixels), see score_against_reconstructed().
Parameters
----------
X: numpy matrix of voxel activation from new data
[observations,voxels]
y: numpy array of stimulus features. [observations, 2]
Returns
-------
score_value: the error measurement between the actual
feature and predicted features, [observations].
"""
pred_features = self.predict(X)
ssres = np.sum((pred_features - y) ** 2, axis=1)
sstot = np.sum((y - np.mean(y)) ** 2, axis=1)
score_value = 1 - (ssres / sstot)
return score_value
def score_against_reconstructed(self, X, y, metric="euclidean"):
"""Calculates a distance metric between reconstructed features in
the 2D stimulus domain (i.e. reconstructions in pixels) given
some observations X, and expected features y. Expected features must
also be in the pixel stimulus domain.
To score the reconstructions against the expected maxima, see score().
Parameters
----------
X: numpy matrix of voxel activation from new data
[observations, voxels]
y: numpy array of the expected stimulus reconstruction values [pixels,
observations].
metric: string specifying the distance metric, either "euclidean" or
"cosine".
Returns
-------
score_value: the error measurement between the reconstructed feature
values as the expected values, [observations].
"""
yhat = self.predict_feature_responses(X)
if metric == "euclidean":
score_value = euclidean_distances(y.T, yhat.T)
elif metric == "cosine":
score_value = cosine_distances(y.T, yhat.T)
return score_value[0, :]
def get_params(self, deep: bool = True):
"""Returns model parameters.
Returns
-------
params: parameter of this object
"""
return {"n_channels": self.n_channels, "channel_exp": self.channel_exp,
"stim_fov": self.stim_fov, "stim_pixels": self.stim_pixels,
"stim_radius_px": self.stim_radius_px, "xp": self.xp,
"yp": self.yp, "channels": self.channels, "channel_limits":
self.channel_limits}
def set_params(self, **parameters):
"""Sets model parameters after initialization.
Parameters
----------
parameters: structure with parameters and change values
"""
for parameter, value in parameters.items():
setattr(self, parameter, value)
self._check_params()
return self
def _make_2d_cosine(self, x, y, x_center, y_center, s):
"""Defines a 2D exponentiated cosine (isometric, e.g. constant width
in x & y) for use as a basis function. Function goes to zero at the
given size constant s. That is, the function is given by if r <= s:
f(r) = (0.5 + 0.5*cos(r*pi/s)))**channel_exp else: 0 where r is
the Euclidean distance from the center of the function. This will
yield a Gaussian-like function, centered at (x_center, y_center).
Parameters
----------
x: x-coordinates of the stimulus space, [npixels, 1] matrix
y: y-coordinates of the stimulus space, [npixels, 1] matrix
x_center: x-coordinate of basis function centers (sequence, nchannels)
y_center: y-coordinate of basis function centers (sequence, nchannels)
s: size constant of the 2D cosine function. This is the radius where
the function is non-zero.
Returns
-------
cos_functions: basis functions defined in the 2D stimulus space.
returns a [nchannels, npixels] matrix.
"""
cos_functions = np.zeros((len(x_center), len(x)))
for i in range(len(x_center)):
myr = np.sqrt((x - x_center[i]) ** 2 + (y - y_center[i]) ** 2). \
squeeze()
qq = (myr <= s) * 1
zp = ((0.5 * (1 + np.cos(myr * np.pi / s))) ** self.channel_exp)
cos_functions[i, :] = zp * qq
return cos_functions
def _2d_cosine_sz_to_fwhm(self, size_constant):
fwhm = 2 * size_constant \
* np.arccos((0.5 ** (1 / self.channel_exp) - 0.5) / 0.5) / np.pi
return fwhm
def _2d_cosine_fwhm_to_sz(self, fwhm):
"""For an exponentiated 2D cosine basis function, converts the
full-width half-maximum (FWHM) of that function to the function's
size constant. The size constant is the variable s in the function
below:
if r <= s: f(r) = (0.5 + 0.5*cos(r*pi/s)))**channel_exp
else: 0 where r is the Euclidean distance from the center of
the function.
Parameters
----------
fwhm: a float indicating the full-width half-maximum in stimulus space
Returns
-------
sz: the size constant of the exponentiated cosine
"""
sz = (0.5 * np.pi * fwhm) / \
(np.arccos((0.5 ** (1 / self.channel_exp) - 0.5) / 0.5))
return sz
def define_basis_functions_sqgrid(self, nchannels, channel_size=None):
"""Define basis functions (aka channels) arrange in a square grid.
Sets the self.channels parameter.
Parameters
----------
nchannels: number of channels in the x (horizontal) direction
channel_size: the desired full-width half-maximum (FWHM) of the
channel, in stimulus space.
Returns
-------
self.channels: defines channels, a [nchannels, npixels] matrix.
channel_centers: numpy array of the centers of each channel, given as
[nchannels x 2] matrix
"""
# If given a single value for nchannels, expand to make a square
if not isinstance(nchannels, list):
nchannels = [nchannels, nchannels]
chan_xcenters = np.linspace(self.channel_limits[0][0],
self.channel_limits[0][1], nchannels[0])
chan_ycenters = np.linspace(self.channel_limits[1][0],
self.channel_limits[1][1], nchannels[1])
cx, cy = np.meshgrid(chan_xcenters, chan_ycenters)
cx = cx.reshape(-1, 1)
cy = cy.reshape(-1, 1)
if channel_size is None:
# To get even coverage, setting the channel FWHM to ~1.1x-1.2x the
# spacing between the channels might work. (See Sprague et al. 2013
# Methods & Supplementary Figure 3 -- this is for cosine exp = 7,
# your mileage may vary for other exponents!).
channel_size = 1.2 * (chan_xcenters[1] - chan_xcenters[0])
cos_width = self._2d_cosine_fwhm_to_sz(channel_size)
# define exponentiated function
self.channels = self._make_2d_cosine(self.xp.reshape(-1, 1),
self.yp.reshape(-1, 1), cx, cy,
cos_width)
self.n_channels = self.channels.shape[0]
return self.channels, np.hstack([cx, cy])
def define_basis_functions_trigrid(self, grid_radius, channel_size=None):
"""Define basis functions (aka channels) arranged in a triangular grid.
Returns
-------
self.channels: defines channels, [nchannels, npixels] matrix.
channel_centers: numpy array of the centers of each channel
"""
x_dist = np.diff(self.channel_limits[0]) / (grid_radius * 2)
y_dist = x_dist * np.sqrt(3) * 0.5
trigrid = np.zeros((0, 2))
xbase = np.expand_dims(np.arange(self.channel_limits[0][0],
self.channel_limits[0][1],
x_dist), 1)
for yi, y in enumerate(np.arange(self.channel_limits[1][0],
self.channel_limits[1][1], y_dist)):
if (yi % 2) == 0:
xx = xbase.copy()
yy = np.ones((xx.size, 1)) * y
else:
xx = xbase.copy() + x_dist / 2
yy = np.ones((xx.size, 1)) * y
trigrid = np.vstack(
(trigrid, np.hstack((xx, yy))))
if channel_size is None:
# To get even coverage, setting the channel FWHM to ~1.1x-1.2x the
# spacing between the channels might work. (See Sprague et al. 2013
# Methods & Supplementary Figure 3 -- this is for cosine exp = 7,
# your mileage may vary for other exponents!).
channel_size = 1.1 * x_dist
cos_width = self._2d_cosine_fwhm_to_sz(channel_size)
self.channels = self._make_2d_cosine(self.xp.reshape(-1, 1),
self.yp.reshape(-1, 1),
trigrid[:, 0],
trigrid[:, 1], cos_width)
self.n_channels = self.channels.shape[0]
return self.channels, trigrid
def _define_trial_activations(self, stim_centers, stim_radius=None):
"""Defines a numpy matrix of predicted channel responses for each
trial/observation. Assumes that the presented stimulus is circular in
the 2D stimulus space. This can effectively be a single circular
pixel if stim_radius=0.5.
Parameters
-------
stim_centers: numpy array of 2D stimulus features for each observation,
expected dimensions are [observations, 2].
stim_radius: scalar value or array-like specifying the radius of the
circular stimulus for each observation, [observations]. While
this can be read-out from the property self.stim_radius_px,
here the user can specify it in case they are retraining the
model with new observations.
Returns
-------
C: numpy array of predicted channel responses [observations, pixels]
"""
nstim = stim_centers.shape[0]
if self.stim_radius_px is None:
if stim_radius is None:
raise ValueError("No defined stimulus radius. Please set.")
else:
self.stim_radius_px = stim_radius
if not isinstance(self.stim_radius_px, np.ndarray) or not isinstance(
self.stim_radius_px, list):
self.stim_radius_px = np.ones(nstim) * self.stim_radius_px
# Create a mask for every stimulus observation in the stimulus domain
stimulus_mask = np.zeros((self.xp.size, nstim))
for i in range(nstim):
rad_vals = ((self.xp.reshape(-1, 1) - stim_centers[i, 0]) ** 2 +
(self.yp.reshape(-1, 1) - stim_centers[i, 1]) ** 2)
inds = np.where(rad_vals < self.stim_radius_px[i])[0]
stimulus_mask[inds, i] = 1
# Go from the stimulus domain to the channel domain
C = self.channels.squeeze() @ stimulus_mask
C = C.transpose()
# Check that C is full rank
if np.linalg.matrix_rank(C) < self.n_channels:
warnings.warn("Stimulus matrix is {}, not full rank. May cause "
"issues with stimulus prediction/reconstruction.".
format(np.linalg.matrix_rank(C)), RuntimeWarning)
return C
def _predict_channel_responses(self, X):
"""Computes predicted channel responses from data
(e.g. C2 in Brouwer & Heeger 2009)
Parameters
----------
X: numpy data matrix. [observations, voxels]
Returns
-------
channel_response: numpy matrix of channel responses. [channels,
observations]
"""
channel_response = np.matmul(np.linalg.pinv(self.W_), X.transpose())
return channel_response
def predict_feature_responses(self, X):
"""Takes channel weights and transforms them into continuous
functions defined in the feature domain.
Parameters
---------
X: numpy matrix of data. [observations, voxels]
Returns
-------
pred_response: predict response from all channels. This is the stimulus
reconstruction in the channel domain. [pixels, observations]
"""
pred_response = np.matmul(self.channels.transpose(),
self._predict_channel_responses(X))
return pred_response
def _predict_features(self, X):
"""Predicts feature value from data in X.
Takes the maximum of the reconstructed, i.e. predicted response
function.
Parameters
---------
X: numpy matrix of data. [observations, voxels]
Returns
-------
pred_features: numpy matrix of predicted stimulus features.
[observations, 2]
"""
pred_response = self.predict_feature_responses(X)
feature_ind = np.argmax(pred_response, 0)
pred_features = np.hstack((self.xp.reshape(-1, 1)[feature_ind],
self.yp.reshape(-1, 1)[feature_ind]))
return pred_features
|
brainiak/brainiak
|
brainiak/reconstruct/iem.py
|
Python
|
apache-2.0
| 45,193
|
[
"Gaussian"
] |
378158d4d099bdb90d6f96ff533844ef4cf78d084ca57059fa3a37f06f5d7079
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import map
from builtins import zip
from builtins import range
import os
import operator
from math import log
from Bio import motifs
import numpy as np
def get_motif_bg_freq(meme_file):
"""Read backgroud frequencies as determined by MEME
Biopython does not support reading background frequencies from MEME file
This method implements the missing feature.
Parameters
----------
meme_file: str
Location of MEME file
Returns
-------
bg_frequencies = dict
A dict with bases as keys and corresponding frequency as values
"""
frequencies_line = None
with open(meme_file, 'r') as f:
content = [line.rstrip('\n') for line in f]
for index, line in enumerate(content):
if line.find('Background letter frequencies (from')!=-1:
frequencies_line = content[index+1].strip()
break
assert frequencies_line is not None
frequencies = frequencies_line.split(' ')
values = [float(x) for x in frequencies[1:][::2]]
keys = frequencies[0:][::2]
bg_frequencies = {k:v for k,v in zip(keys, values)}
return bg_frequencies
def get_total_sequences(meme_file):
"""Get total sequences
Parameters
----------
meme_file: str
Location of MEME file
Returns
-------
num_seq: int
Number of sequences that meme was used on
"""
seq_line = None
count = 0
"""
with open(meme_file, 'rb') as f:
for line in f:
if line.strip().startswith(b'TRAINING SET'):
star_line = f.next().strip()
datafile_line = f.next().strip()
alphabet_line = f.next().strip()
header_line = f.next().strip()
dashed_line = f.next().strip()
count = 0
while not f.next().strip().startswith('*********'):
count+=1
"""
with open(meme_file, 'r') as f:
content = [line.rstrip('\n') for line in f]
for index, line in enumerate(content):
if line.find('TRAINING SET')==0:
start_index = index
break
lines = content[start_index+6:]
count = 0
for line in lines:
if line.find('*********')==0:
break
else:
count+=1
return count*2
#TODO Rename this!
def read_memefile(meme_file):
"""Summariser for MEME file
Read meme file
Parameters
----------
meme_file: str
Location of MEME file
Returns
-------
summary: dict
A summary containing the following details:
- motif_occurences: dict contatining number of type each motif occurs. dict is indexed by key: 'motif1', 'motif2' etc
- motif_records: List of Biopython motif objects
summary:
"""
summary = {}
summary['motif_occurrences'] = {}
records = motifs.parse(open(os.path.abspath(meme_file)), 'meme')
summary['total_motifs'] = len(records)
num_occurrences = []
for index, record in enumerate(records):
num_occurrences.append(int(getattr(record,'num_occurrences','Unknown')))
sorted_occurences = sorted(enumerate(num_occurrences), key=lambda x: x[1])
summary['motif_occurrences'] = {'motif{}'.format(index+1):value for index,value in sorted_occurences}
summary['motif_records'] = records
### Read background frequenceies H since bioppython does not support them
bg_frequencies = get_motif_bg_freq(meme_file)
summary['bg_frequencies'] = bg_frequencies
return summary
def create_position_profile(record, count_type='pwm'):
"""Find bases with minimum or maximum count type"""
bases = ['A', 'C','T','G']
base_profile = getattr(record, count_type)
base_A = np.array(base_profile['A'])
base_C = np.array(base_profile['C'])
base_T = np.array(base_profile['T'])
base_G = np.array(base_profile['G'])
profile = np.vstack((base_A,base_C,base_T,base_G))
position_profile = []
for col in range(0, record.length):
p_profile = profile[:,col]
position_profile.append({k:v for k,v in zip(bases,p_profile)})
return position_profile
def get_max_occuring_bases(record, max_count, count_type='counts'):
"""Find bases with maximum frequency at each position of motif record
Given a motif record, find at each position, the base with maximum
frequency and it's frequency
"""
profile = create_position_profile(record, count_type)
sorted_profile = []
for i, p in enumerate(profile):
sorted_p = sorted(list(p.items()), key=operator.itemgetter(1))
sorted_profile.append(sorted_p[-max_count:])
return sorted_profile
def position_wise_profile(counts_dict, length):
"""
Convert base to position wise profile
"""
profile = list(map(dict, list(zip(*[[(k, v) for v in value] for k, value in list(counts_dict.items())]))))
return profile
def find_max_occurence(profile, max_count=2):
"""
Return profile with base corresponding to max scores[CHECK!]
"""
sorted_profile = []
for p in profile:
sorted_profile.append(sorted(list(p.items()), key=lambda x:x[1]))
for i,p in enumerate(sorted_profile):
sorted_profile[i] = p[-max_count:]
return sorted_profile
def get_motif_ic(meme_file, n_motif):
"""Get position wise infomartion content of motif
Parameters
----------
motif: motif
A Biopython motif record
Returns
-------
motif_ic: array
An array of information content at each base position
"""
bases = ['A', 'C', 'T', 'G']
motif_ic = []
meme_summary = read_memefile(meme_file)
motif = meme_summary['motif_records'][n_motif]
bg_frequencies = meme_summary['bg_frequencies']
log_odds = motif.pwm.log_odds()#bg_frequencies)
pwm = motif.pwm
for i in range(0, motif.length):
s = 0
for base in bases:
p = pwm[base][i]
if p<=0:
s1 = 0
else:
s1 = log(p,2)
s2 = log(bg_frequencies[base],2)
s = s + p*(s1-s2)
motif_ic.append(s)
return motif_ic
|
saketkc/moca
|
moca/helpers/meme.py
|
Python
|
isc
| 6,268
|
[
"Biopython"
] |
da195e6ad2fb32aa21839a5d5a4beb9e524bb98e1b8b5a8a387a4da71aab7a7a
|
# -*- coding: utf-8 -*-
import pytest
from csirtg_indicator.indicator import Indicator
from pprint import pprint
@pytest.fixture
def indicator():
i = {
'indicator': "http://refreshdharan.com/bg/excel2/index.php?userid={dong.keonkwonfinancialconsultd@yahoo.com}",
'itype': 'fqdn',
'provider': "me.com",
'tlp': "amber",
'confidence': "85",
'reporttime': '2015-01-01T00:00:00Z'
}
return Indicator(**i)
def test_format_keys(indicator):
i =indicator.format_keys()
assert i.indicator == indicator.indicator
if __name__ == '__main__':
test_format_keys()
|
csirtgadgets/csirtg-indicator-py
|
test/format/test_format_keys.py
|
Python
|
mpl-2.0
| 653
|
[
"Amber"
] |
440e3ed903a93a35471e548682cbbe8c1e236654e9bd275822c6a90d6e4be4c6
|
#Copyright (C) 2013 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Generals
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules contains functions reading, generating, and segmenting strain data
"""
import copy
import logging, numpy
import pycbc.noise
import pycbc.types
from pycbc.types import TimeSeries, zeros
from pycbc.types import Array, FrequencySeries, complex_same_precision_as
from pycbc.types import MultiDetOptionAppendAction, MultiDetOptionAction
from pycbc.types import MultiDetOptionActionSpecial
from pycbc.types import required_opts, required_opts_multi_ifo
from pycbc.types import ensure_one_opt, ensure_one_opt_multi_ifo
from pycbc.types import copy_opts_for_single_ifo
from pycbc.inject import InjectionSet, SGBurstInjectionSet
from pycbc.filter import resample_to_delta_t, highpass, make_frequency_series
from pycbc.filter.zpk import filter_zpk
from pycbc.waveform.spa_tmplt import spa_distance
import pycbc.psd
import pycbc.fft
import pycbc.events
import pycbc.frame
import pycbc.filter
from scipy.signal import kaiserord
def next_power_of_2(n):
"""Return the smallest integer power of 2 larger than the argument.
Parameters
----------
n : int
A positive integer.
Returns
-------
m : int
Smallest integer power of 2 larger than n.
"""
return 1 << n.bit_length()
def detect_loud_glitches(strain, psd_duration=4., psd_stride=2.,
psd_avg_method='median', low_freq_cutoff=30.,
threshold=50., cluster_window=5., corrupt_time=4.,
high_freq_cutoff=None, output_intermediates=False):
"""Automatic identification of loud transients for gating purposes.
This function first estimates the PSD of the input time series using the
FindChirp Welch method. Then it whitens the time series using that
estimate. Finally, it computes the magnitude of the whitened series,
thresholds it and applies the FindChirp clustering over time to the
surviving samples.
Parameters
----------
strain : TimeSeries
Input strain time series to detect glitches over.
psd_duration : {float, 4}
Duration of the segments for PSD estimation in seconds.
psd_stride : {float, 2}
Separation between PSD estimation segments in seconds.
psd_avg_method : {string, 'median'}
Method for averaging PSD estimation segments.
low_freq_cutoff : {float, 30}
Minimum frequency to include in the whitened strain.
threshold : {float, 50}
Minimum magnitude of whitened strain for considering a transient to
be present.
cluster_window : {float, 5}
Length of time window to cluster surviving samples over, in seconds.
corrupt_time : {float, 4}
Amount of time to be discarded at the beginning and end of the input
time series.
high_frequency_cutoff : {float, None}
Maximum frequency to include in the whitened strain. If given, the
input series is downsampled accordingly. If omitted, the Nyquist
frequency is used.
output_intermediates : {bool, False}
Save intermediate time series for debugging.
"""
# don't waste time trying to optimize a single FFT
pycbc.fft.fftw.set_measure_level(0)
if high_freq_cutoff:
strain = resample_to_delta_t(strain, 0.5 / high_freq_cutoff,
method='ldas')
else:
strain = strain.copy()
# taper strain
corrupt_length = int(corrupt_time * strain.sample_rate)
w = numpy.arange(corrupt_length) / float(corrupt_length)
strain[0:corrupt_length] *= pycbc.types.Array(w, dtype=strain.dtype)
strain[(len(strain) - corrupt_length):] *= \
pycbc.types.Array(w[::-1], dtype=strain.dtype)
if output_intermediates:
strain.save_to_wav('strain_conditioned.wav')
# zero-pad strain to a power-of-2 length
strain_pad_length = next_power_of_2(len(strain))
pad_start = int(strain_pad_length / 2 - len(strain) / 2)
pad_end = pad_start + len(strain)
pad_epoch = strain.start_time - pad_start / float(strain.sample_rate)
strain_pad = pycbc.types.TimeSeries(
pycbc.types.zeros(strain_pad_length, dtype=strain.dtype),
delta_t=strain.delta_t, copy=False, epoch=pad_epoch)
strain_pad[pad_start:pad_end] = strain[:]
# estimate the PSD
psd = pycbc.psd.welch(strain[corrupt_length:(len(strain)-corrupt_length)],
seg_len=int(psd_duration * strain.sample_rate),
seg_stride=int(psd_stride * strain.sample_rate),
avg_method=psd_avg_method,
require_exact_data_fit=False)
psd = pycbc.psd.interpolate(psd, 1. / strain_pad.duration)
psd = pycbc.psd.inverse_spectrum_truncation(
psd, int(psd_duration * strain.sample_rate),
low_frequency_cutoff=low_freq_cutoff,
trunc_method='hann')
kmin = int(low_freq_cutoff / psd.delta_f)
psd[0:kmin] = numpy.inf
if high_freq_cutoff:
kmax = int(high_freq_cutoff / psd.delta_f)
psd[kmax:] = numpy.inf
# whiten
strain_tilde = strain_pad.to_frequencyseries()
if high_freq_cutoff:
norm = high_freq_cutoff - low_freq_cutoff
else:
norm = strain.sample_rate / 2. - low_freq_cutoff
strain_tilde *= (psd * norm) ** (-0.5)
strain_pad = strain_tilde.to_timeseries()
if output_intermediates:
strain_pad[pad_start:pad_end].save_to_wav('strain_whitened.wav')
mag = abs(strain_pad[pad_start:pad_end])
if output_intermediates:
mag.save('strain_whitened_mag.npy')
mag = mag.numpy()
# remove strain corrupted by filters at the ends
mag[0:corrupt_length] = 0
mag[-1:-corrupt_length-1:-1] = 0
# find peaks and their times
indices = numpy.where(mag > threshold)[0]
cluster_idx = pycbc.events.findchirp_cluster_over_window(
indices, numpy.array(mag[indices]),
int(cluster_window*strain.sample_rate))
times = [idx * strain.delta_t + strain.start_time \
for idx in indices[cluster_idx]]
pycbc.fft.fftw.set_measure_level(pycbc.fft.fftw._default_measurelvl)
return times
def from_cli(opt, dyn_range_fac=1, precision='single',
inj_filter_rejector=None):
"""Parses the CLI options related to strain data reading and conditioning.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (gps-start-time, gps-end-time, strain-high-pass,
pad-data, sample-rate, (frame-cache or frame-files), channel-name,
fake-strain, fake-strain-seed, fake-strain-from-file, gating_file).
dyn_range_fac : {float, 1}, optional
A large constant to reduce the dynamic range of the strain.
precision : string
Precision of the returned strain ('single' or 'double').
inj_filter_rejector : InjFilterRejector instance; optional, default=None
If given send the InjFilterRejector instance to the inject module so
that it can store a reduced representation of injections if
necessary.
Returns
-------
strain : TimeSeries
The time series containing the conditioned strain data.
"""
gating_info = {}
injector = InjectionSet.from_cli(opt)
if opt.frame_cache or opt.frame_files or opt.frame_type or opt.hdf_store:
if opt.frame_cache:
frame_source = opt.frame_cache
if opt.frame_files:
frame_source = opt.frame_files
logging.info("Reading Frames")
if hasattr(opt, 'frame_sieve') and opt.frame_sieve:
sieve = opt.frame_sieve
else:
sieve = None
if opt.frame_type:
strain = pycbc.frame.query_and_read_frame(
opt.frame_type, opt.channel_name,
start_time=opt.gps_start_time-opt.pad_data,
end_time=opt.gps_end_time+opt.pad_data,
sieve=sieve)
elif opt.frame_files or opt.frame_cache:
strain = pycbc.frame.read_frame(
frame_source, opt.channel_name,
start_time=opt.gps_start_time-opt.pad_data,
end_time=opt.gps_end_time+opt.pad_data,
sieve=sieve)
elif opt.hdf_store:
strain = pycbc.frame.read_store(opt.hdf_store, opt.channel_name,
opt.gps_start_time - opt.pad_data,
opt.gps_end_time + opt.pad_data)
if opt.zpk_z and opt.zpk_p and opt.zpk_k:
logging.info("Highpass Filtering")
strain = highpass(strain, frequency=opt.strain_high_pass)
logging.info("Applying zpk filter")
z = numpy.array(opt.zpk_z)
p = numpy.array(opt.zpk_p)
k = float(opt.zpk_k)
strain = filter_zpk(strain.astype(numpy.float64), z, p, k)
if opt.normalize_strain:
logging.info("Dividing strain by constant")
l = opt.normalize_strain
strain = strain / l
if injector is not None:
logging.info("Applying injections")
injections = \
injector.apply(strain, opt.channel_name[0:2],
distance_scale=opt.injection_scale_factor,
inj_filter_rejector=inj_filter_rejector)
if opt.sgburst_injection_file:
logging.info("Applying sine-Gaussian burst injections")
injector = SGBurstInjectionSet(opt.sgburst_injection_file)
injector.apply(strain, opt.channel_name[0:2],
distance_scale=opt.injection_scale_factor)
if opt.strain_high_pass:
logging.info("Highpass Filtering")
strain = highpass(strain, frequency=opt.strain_high_pass)
if precision == 'single':
logging.info("Converting to float32")
strain = (strain * dyn_range_fac).astype(pycbc.types.float32)
elif precision == "double":
logging.info("Converting to float64")
strain = (strain * dyn_range_fac).astype(pycbc.types.float64)
else:
raise ValueError("Unrecognized precision {}".format(precision))
if opt.sample_rate:
logging.info("Resampling data")
strain = resample_to_delta_t(strain,
1. / opt.sample_rate,
method='ldas')
if opt.gating_file is not None:
logging.info("Gating times contained in gating file")
gate_params = numpy.loadtxt(opt.gating_file)
if len(gate_params.shape) == 1:
gate_params = [gate_params]
strain = gate_data(strain, gate_params)
gating_info['file'] = \
[gp for gp in gate_params \
if (gp[0] + gp[1] + gp[2] >= strain.start_time) \
and (gp[0] - gp[1] - gp[2] <= strain.end_time)]
if opt.autogating_threshold is not None:
gating_info['auto'] = []
for _ in range(opt.autogating_max_iterations):
glitch_times = detect_loud_glitches(
strain, threshold=opt.autogating_threshold,
cluster_window=opt.autogating_cluster,
low_freq_cutoff=opt.strain_high_pass,
corrupt_time=opt.pad_data + opt.autogating_pad)
gate_params = [[gt, opt.autogating_width, opt.autogating_taper]
for gt in glitch_times]
gating_info['auto'] += gate_params
strain = gate_data(strain, gate_params)
if len(glitch_times) > 0:
logging.info('Autogating at %s',
', '.join(['%.3f' % gt
for gt in glitch_times]))
else:
break
if opt.strain_high_pass:
logging.info("Highpass Filtering")
strain = highpass(strain, frequency=opt.strain_high_pass)
if hasattr(opt, 'witness_frame_type') and opt.witness_frame_type:
stilde = strain.to_frequencyseries()
import h5py
tf_file = h5py.File(opt.witness_tf_file)
for key in tf_file:
witness = pycbc.frame.query_and_read_frame(opt.witness_frame_type, str(key),
start_time=strain.start_time, end_time=strain.end_time)
witness = (witness * dyn_range_fac).astype(strain.dtype)
tf = pycbc.types.load_frequencyseries(opt.witness_tf_file, group=key)
tf = tf.astype(stilde.dtype)
flen = int(opt.witness_filter_length * strain.sample_rate)
tf = pycbc.psd.interpolate(tf, stilde.delta_f)
tf_time = tf.to_timeseries()
window = Array(numpy.hanning(flen * 2), dtype=strain.dtype)
tf_time[0:flen] *= window[flen:]
tf_time[len(tf_time)-flen:] *= window[0:flen]
tf = tf_time.to_frequencyseries()
kmax = min(len(tf), len(stilde) - 1)
stilde[:kmax] -= tf[:kmax] * witness.to_frequencyseries()[:kmax]
strain = stilde.to_timeseries()
if opt.pad_data:
logging.info("Remove Padding")
start = int(opt.pad_data * strain.sample_rate)
end = int(len(strain) - strain.sample_rate * opt.pad_data)
strain = strain[start:end]
if opt.fake_strain or opt.fake_strain_from_file:
logging.info("Generating Fake Strain")
if not opt.low_frequency_cutoff:
raise ValueError('Please provide low frequency cutoff to '
'generate a fake strain')
duration = opt.gps_end_time - opt.gps_start_time
pdf = 1. / 128
plen = int(opt.sample_rate / pdf) // 2 + 1
if opt.fake_strain_from_file:
logging.info("Reading ASD from file")
strain_psd = pycbc.psd.from_txt(opt.fake_strain_from_file, plen, pdf,
opt.low_frequency_cutoff, is_asd_file=True)
elif opt.fake_strain != 'zeroNoise':
logging.info("Making PSD for strain")
strain_psd = pycbc.psd.from_string(opt.fake_strain, plen, pdf,
opt.low_frequency_cutoff)
if opt.fake_strain == 'zeroNoise':
logging.info("Making zero-noise time series")
strain = TimeSeries(pycbc.types.zeros(duration * 16384),
delta_t=1. / 16384,
epoch=opt.gps_start_time)
else:
logging.info("Making colored noise")
from pycbc.noise.reproduceable import colored_noise
lowfreq = opt.low_frequency_cutoff / 2.
strain = colored_noise(strain_psd, opt.gps_start_time,
opt.gps_end_time,
seed=opt.fake_strain_seed,
low_frequency_cutoff=lowfreq)
if not opt.channel_name and (opt.injection_file \
or opt.sgburst_injection_file):
raise ValueError('Please provide channel names with the format '
'ifo:channel (e.g. H1:CALIB-STRAIN) to inject '
'simulated signals into fake strain')
if injector is not None:
logging.info("Applying injections")
injections = \
injector.apply(strain, opt.channel_name[0:2],
distance_scale=opt.injection_scale_factor,
inj_filter_rejector=inj_filter_rejector)
if opt.sgburst_injection_file:
logging.info("Applying sine-Gaussian burst injections")
injector = SGBurstInjectionSet(opt.sgburst_injection_file)
injector.apply(strain, opt.channel_name[0:2],
distance_scale=opt.injection_scale_factor)
if opt.strain_high_pass:
logging.info("Highpass Filtering")
strain = highpass(strain, frequency=opt.strain_high_pass)
logging.info("Resampling data")
strain = resample_to_delta_t(strain, 1. / opt.sample_rate)
if precision == 'single':
logging.info("Converting to float32")
strain = (dyn_range_fac * strain).astype(pycbc.types.float32)
elif precision == 'double':
logging.info("Converting to float64")
strain = (dyn_range_fac * strain).astype(pycbc.types.float64)
else:
raise ValueError("Unrecognized precision {}".format(precision))
if opt.strain_high_pass:
logging.info("Highpass Filtering")
strain = highpass(strain, frequency=opt.strain_high_pass)
if opt.taper_data:
logging.info("Tapering data")
# Use auto-gating, a one-sided gate is a taper
pd_taper_window = opt.taper_data
gate_params = [(strain.start_time, 0., pd_taper_window)]
gate_params.append((strain.end_time, 0., pd_taper_window))
gate_data(strain, gate_params)
if injector is not None:
strain.injections = injections
strain.gating_info = gating_info
return strain
def from_cli_single_ifo(opt, ifo, inj_filter_rejector=None, **kwargs):
"""
Get the strain for a single ifo when using the multi-detector CLI
"""
single_det_opt = copy_opts_for_single_ifo(opt, ifo)
return from_cli(single_det_opt,
inj_filter_rejector=inj_filter_rejector, **kwargs)
def from_cli_multi_ifos(opt, ifos, inj_filter_rejector_dict=None, **kwargs):
"""
Get the strain for all ifos when using the multi-detector CLI
"""
strain = {}
if inj_filter_rejector_dict is None:
inj_filter_rejector_dict = {ifo: None for ifo in ifos}
for ifo in ifos:
strain[ifo] = from_cli_single_ifo(opt, ifo,
inj_filter_rejector_dict[ifo], **kwargs)
return strain
def insert_strain_option_group(parser, gps_times=True):
""" Add strain-related options to the optparser object.
Adds the options used to call the pycbc.strain.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance.
gps_times : bool, optional
Include ``--gps-start-time`` and ``--gps-end-time`` options. Default
is True.
"""
data_reading_group = parser.add_argument_group("Options for obtaining h(t)",
"These options are used for generating h(t) either by "
"reading from a file or by generating it. This is only "
"needed if the PSD is to be estimated from the data, ie. "
" if the --psd-estimation option is given.")
# Required options
if gps_times:
data_reading_group.add_argument("--gps-start-time",
help="The gps start time of the data "
"(integer seconds)", type=int)
data_reading_group.add_argument("--gps-end-time",
help="The gps end time of the data "
" (integer seconds)", type=int)
data_reading_group.add_argument("--strain-high-pass", type=float,
help="High pass frequency")
data_reading_group.add_argument("--pad-data", default=8,
help="Extra padding to remove highpass corruption "
"(integer seconds)", type=int)
data_reading_group.add_argument("--taper-data",
help="Taper ends of data to zero using the supplied length as a "
"window (integer seconds)", type=int, default=0)
data_reading_group.add_argument("--sample-rate", type=int,
help="The sample rate to use for h(t) generation (integer Hz).")
data_reading_group.add_argument("--channel-name", type=str,
help="The channel containing the gravitational strain data")
# Read from cache file
data_reading_group.add_argument("--frame-cache", type=str, nargs="+",
help="Cache file containing the frame locations.")
# Read from frame files
data_reading_group.add_argument("--frame-files",
type=str, nargs="+",
help="list of frame files")
# Read from hdf store file
data_reading_group.add_argument("--hdf-store",
type=str,
help="Store of time series data in hdf format")
# Use datafind to get frame files
data_reading_group.add_argument("--frame-type",
type=str,
help="(optional), replaces frame-files. Use datafind "
"to get the needed frame file(s) of this type.")
# Filter frame files by URL
data_reading_group.add_argument("--frame-sieve",
type=str,
help="(optional), Only use frame files where the "
"URL matches the regular expression given.")
# Generate gaussian noise with given psd
data_reading_group.add_argument("--fake-strain",
help="Name of model PSD for generating fake gaussian noise.",
choices=pycbc.psd.get_lalsim_psd_list() + ['zeroNoise'])
data_reading_group.add_argument("--fake-strain-seed", type=int, default=0,
help="Seed value for the generation of fake colored"
" gaussian noise")
data_reading_group.add_argument("--fake-strain-from-file",
help="File containing ASD for generating fake noise from it.")
# Injection options
data_reading_group.add_argument("--injection-file", type=str,
help="(optional) Injection file used to add "
"waveforms into the strain")
data_reading_group.add_argument("--sgburst-injection-file", type=str,
help="(optional) Injection file used to add "
"sine-Gaussian burst waveforms into the strain")
data_reading_group.add_argument("--injection-scale-factor", type=float,
default=1, help="Divide injections by this factor "
"before injecting into the data.")
data_reading_group.add_argument('--injection-f-ref', type=float,
help='Reference frequency in Hz for '
'creating CBC injections from an XML '
'file.')
data_reading_group.add_argument('--injection-f-final', type=float,
help='Override the f_final field of a CBC '
'XML injection file.')
# Gating options
data_reading_group.add_argument("--gating-file", type=str,
help="(optional) Text file of gating segments to apply."
" Format of each line is (all times in secs):"
" gps_time zeros_half_width pad_half_width")
data_reading_group.add_argument('--autogating-threshold', type=float,
metavar='SIGMA',
help='If given, find and gate glitches '
'producing a deviation larger than '
'SIGMA in the whitened strain time '
'series.')
data_reading_group.add_argument('--autogating-max-iterations', type=int,
metavar='SIGMA', default=1,
help='If given, iteratively apply '
'autogating')
data_reading_group.add_argument('--autogating-cluster', type=float,
metavar='SECONDS', default=5.,
help='Length of clustering window for '
'detecting glitches for autogating.')
data_reading_group.add_argument('--autogating-width', type=float,
metavar='SECONDS', default=0.25,
help='Half-width of the gating window.')
data_reading_group.add_argument('--autogating-taper', type=float,
metavar='SECONDS', default=0.25,
help='Taper the strain before and after '
'each gating window over a duration '
'of SECONDS.')
data_reading_group.add_argument('--autogating-pad', type=float,
metavar='SECONDS', default=16,
help='Ignore the given length of whitened '
'strain at the ends of a segment, to '
'avoid filters ringing.')
# Optional
data_reading_group.add_argument("--normalize-strain", type=float,
help="(optional) Divide frame data by constant.")
data_reading_group.add_argument("--zpk-z", type=float, nargs="+",
help="(optional) Zero-pole-gain (zpk) filter strain. "
"A list of zeros for transfer function")
data_reading_group.add_argument("--zpk-p", type=float, nargs="+",
help="(optional) Zero-pole-gain (zpk) filter strain. "
"A list of poles for transfer function")
data_reading_group.add_argument("--zpk-k", type=float,
help="(optional) Zero-pole-gain (zpk) filter strain. "
"Transfer function gain")
# Options to apply to subtract noise from a witness channel and known
# transfer function.
data_reading_group.add_argument("--witness-frame-type", type=str,
help="(optional), frame type which will be use to query the"
" witness channel data.")
data_reading_group.add_argument("--witness-tf-file", type=str,
help="an hdf file containing the transfer"
" functions and the associated channel names")
data_reading_group.add_argument("--witness-filter-length", type=float,
help="filter length in seconds for the transfer function")
return data_reading_group
# FIXME: This repeats almost all of the options above. Any nice way of reducing
# this?
def insert_strain_option_group_multi_ifo(parser, gps_times=True):
"""
Adds the options used to call the pycbc.strain.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance.
gps_times : bool, optional
Include ``--gps-start-time`` and ``--gps-end-time`` options. Default
is True.
"""
data_reading_group_multi = parser.add_argument_group("Options for obtaining"
" h(t)",
"These options are used for generating h(t) either by "
"reading from a file or by generating it. This is only "
"needed if the PSD is to be estimated from the data, ie. "
"if the --psd-estimation option is given. This group "
"supports reading from multiple ifos simultaneously.")
# Required options
if gps_times:
data_reading_group_multi.add_argument(
"--gps-start-time", nargs='+', action=MultiDetOptionAction,
metavar='IFO:TIME', type=int,
help="The gps start time of the data (integer seconds)")
data_reading_group_multi.add_argument(
"--gps-end-time", nargs='+', action=MultiDetOptionAction,
metavar='IFO:TIME', type=int,
help="The gps end time of the data (integer seconds)")
data_reading_group_multi.add_argument("--strain-high-pass", nargs='+',
action=MultiDetOptionAction,
type=float, metavar='IFO:FREQUENCY',
help="High pass frequency")
data_reading_group_multi.add_argument("--pad-data", nargs='+', default=8,
action=MultiDetOptionAction,
type=int, metavar='IFO:LENGTH',
help="Extra padding to remove highpass corruption "
"(integer seconds)")
data_reading_group_multi.add_argument("--taper-data", nargs='+',
action=MultiDetOptionAction,
type=int, default=0, metavar='IFO:LENGTH',
help="Taper ends of data to zero using the "
"supplied length as a window (integer seconds)")
data_reading_group_multi.add_argument("--sample-rate", type=int, nargs='+',
action=MultiDetOptionAction, metavar='IFO:RATE',
help="The sample rate to use for h(t) generation "
" (integer Hz).")
data_reading_group_multi.add_argument("--channel-name", type=str, nargs='+',
action=MultiDetOptionActionSpecial,
metavar='IFO:CHANNEL',
help="The channel containing the gravitational "
"strain data")
# Read from cache file
data_reading_group_multi.add_argument("--frame-cache", type=str, nargs="+",
action=MultiDetOptionAppendAction,
metavar='IFO:FRAME_CACHE',
help="Cache file containing the frame locations.")
# Read from frame files
data_reading_group_multi.add_argument("--frame-files", type=str, nargs="+",
action=MultiDetOptionAppendAction,
metavar='IFO:FRAME_FILES',
help="list of frame files")
# Read from hdf store file
data_reading_group_multi.add_argument("--hdf-store", type=str, nargs='+',
action=MultiDetOptionAction,
metavar='IFO:HDF_STORE_FILE',
help="Store of time series data in hdf format")
# Use datafind to get frame files
data_reading_group_multi.add_argument("--frame-type", type=str, nargs="+",
action=MultiDetOptionAction,
metavar='IFO:FRAME_TYPE',
help="(optional) Replaces frame-files. "
"Use datafind to get the needed frame "
"file(s) of this type.")
# Filter frame files by URL
data_reading_group_multi.add_argument("--frame-sieve", type=str, nargs="+",
action=MultiDetOptionAction,
metavar='IFO:FRAME_SIEVE',
help="(optional), Only use frame files where the "
"URL matches the regular expression given.")
# Generate gaussian noise with given psd
data_reading_group_multi.add_argument("--fake-strain", type=str, nargs="+",
action=MultiDetOptionAction, metavar='IFO:CHOICE',
help="Name of model PSD for generating fake "
"gaussian noise. Choose from %s or zeroNoise" \
%((', ').join(pycbc.psd.get_lalsim_psd_list()),) )
data_reading_group_multi.add_argument("--fake-strain-seed", type=int,
default=0, nargs="+", action=MultiDetOptionAction,
metavar='IFO:SEED',
help="Seed value for the generation of fake "
"colored gaussian noise")
data_reading_group_multi.add_argument("--fake-strain-from-file", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="File containing ASD for generating fake "
"noise from it.")
# Injection options
data_reading_group_multi.add_argument("--injection-file", type=str,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:FILE',
help="(optional) Injection file used to add "
"waveforms into the strain")
data_reading_group_multi.add_argument("--sgburst-injection-file", type=str,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:FILE',
help="(optional) Injection file used to add "
"sine-Gaussian burst waveforms into the strain")
data_reading_group_multi.add_argument("--injection-scale-factor",
type=float, nargs="+", action=MultiDetOptionAction,
metavar="IFO:VAL", default=1.,
help="Multiple injections by this factor "
"before injecting into the data.")
data_reading_group_multi.add_argument('--injection-f-ref', type=float,
action=MultiDetOptionAction, metavar='IFO:VALUE',
help='Reference frequency in Hz for '
'creating CBC injections from an XML '
'file.')
data_reading_group_multi.add_argument('--injection-f-final', type=float,
action=MultiDetOptionAction, metavar='IFO:VALUE',
help='Override the f_final field of a CBC '
'XML injection file.')
# Gating options
data_reading_group_multi.add_argument("--gating-file", nargs="+",
action=MultiDetOptionAction,
metavar='IFO:FILE',
help='(optional) Text file of gating segments to apply.'
' Format of each line (units s) :'
' gps_time zeros_half_width pad_half_width')
data_reading_group_multi.add_argument('--autogating-threshold', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SIGMA',
help='If given, find and gate glitches '
'producing a deviation larger than '
'SIGMA in the whitened strain time '
'series.')
data_reading_group_multi.add_argument('--autogating-max-iterations', type=int,
metavar='SIGMA', default=1,
help='If given, iteratively apply '
'autogating')
data_reading_group_multi.add_argument('--autogating-cluster', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=5.,
help='Length of clustering window for '
'detecting glitches for autogating.')
data_reading_group_multi.add_argument('--autogating-width', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=0.25,
help='Half-width of the gating window.')
data_reading_group_multi.add_argument('--autogating-taper', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=0.25,
help='Taper the strain before and after '
'each gating window over a duration '
'of SECONDS.')
data_reading_group_multi.add_argument('--autogating-pad', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=16,
help='Ignore the given length of whitened '
'strain at the ends of a segment, to '
'avoid filters ringing.')
# Optional
data_reading_group_multi.add_argument("--normalize-strain", type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:VALUE',
help="(optional) Divide frame data by constant.")
data_reading_group_multi.add_argument("--zpk-z", type=float,
nargs="+", action=MultiDetOptionAppendAction,
metavar='IFO:VALUE',
help="(optional) Zero-pole-gain (zpk) filter strain. "
"A list of zeros for transfer function")
data_reading_group_multi.add_argument("--zpk-p", type=float,
nargs="+", action=MultiDetOptionAppendAction,
metavar='IFO:VALUE',
help="(optional) Zero-pole-gain (zpk) filter strain. "
"A list of poles for transfer function")
data_reading_group_multi.add_argument("--zpk-k", type=float,
nargs="+", action=MultiDetOptionAppendAction,
metavar='IFO:VALUE',
help="(optional) Zero-pole-gain (zpk) filter strain. "
"Transfer function gain")
return data_reading_group_multi
ensure_one_opt_groups = []
ensure_one_opt_groups.append(['--frame-cache','--fake-strain',
'--fake-strain-from-file',
'--frame-files', '--frame-type',
'--hdf-store'])
required_opts_list = ['--gps-start-time', '--gps-end-time',
'--strain-high-pass', '--pad-data', '--sample-rate',
'--channel-name']
def verify_strain_options(opts, parser):
"""Sanity check provided strain arguments.
Parses the strain data CLI options and verifies that they are consistent
and reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (gps-start-time, gps-end-time, strain-high-pass,
pad-data, sample-rate, frame-cache, channel-name, fake-strain,
fake-strain-seed).
parser : object
OptionParser instance.
"""
for opt_group in ensure_one_opt_groups:
ensure_one_opt(opts, parser, opt_group)
required_opts(opts, parser, required_opts_list)
def verify_strain_options_multi_ifo(opts, parser, ifos):
"""Sanity check provided strain arguments.
Parses the strain data CLI options and verifies that they are consistent
and reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (gps-start-time, gps-end-time, strain-high-pass,
pad-data, sample-rate, frame-cache, channel-name, fake-strain,
fake-strain-seed).
parser : object
OptionParser instance.
ifos : list of strings
List of ifos for which to verify options for
"""
for ifo in ifos:
for opt_group in ensure_one_opt_groups:
ensure_one_opt_multi_ifo(opts, parser, ifo, opt_group)
required_opts_multi_ifo(opts, parser, ifo, required_opts_list)
def gate_data(data, gate_params):
"""Apply a set of gating windows to a time series.
Each gating window is
defined by a central time, a given duration (centered on the given
time) to zero out, and a given duration of smooth tapering on each side of
the window. The window function used for tapering is a Tukey window.
Parameters
----------
data : TimeSeries
The time series to be gated.
gate_params : list
List of parameters for the gating windows. Each element should be a
list or tuple with 3 elements: the central time of the gating window,
the half-duration of the portion to zero out, and the duration of the
Tukey tapering on each side. All times in seconds. The total duration
of the data affected by one gating window is thus twice the second
parameter plus twice the third parameter.
Returns
-------
data: TimeSeries
The gated time series.
"""
def inverted_tukey(M, n_pad):
midlen = M - 2*n_pad
if midlen < 0:
raise ValueError("No zeros left after applying padding.")
padarr = 0.5*(1.+numpy.cos(numpy.pi*numpy.arange(n_pad)/n_pad))
return numpy.concatenate((padarr,numpy.zeros(midlen),padarr[::-1]))
sample_rate = 1./data.delta_t
temp = data.data
for glitch_time, glitch_width, pad_width in gate_params:
t_start = glitch_time - glitch_width - pad_width - data.start_time
t_end = glitch_time + glitch_width + pad_width - data.start_time
if t_start > data.duration or t_end < 0.:
continue # Skip gate segments that don't overlap
win_samples = int(2*sample_rate*(glitch_width+pad_width))
pad_samples = int(sample_rate*pad_width)
window = inverted_tukey(win_samples, pad_samples)
offset = int(t_start * sample_rate)
idx1 = max(0, -offset)
idx2 = min(len(window), len(data)-offset)
temp[idx1+offset:idx2+offset] *= window[idx1:idx2]
return data
class StrainSegments(object):
""" Class for managing manipulation of strain data for the purpose of
matched filtering. This includes methods for segmenting and
conditioning.
"""
def __init__(self, strain, segment_length=None, segment_start_pad=0,
segment_end_pad=0, trigger_start=None, trigger_end=None,
filter_inj_only=False, injection_window=None,
allow_zero_padding=False):
""" Determine how to chop up the strain data into smaller segments
for analysis.
"""
self._fourier_segments = None
self.strain = strain
self.delta_t = strain.delta_t
self.sample_rate = strain.sample_rate
if segment_length:
seg_len = segment_length
else:
seg_len = strain.duration
self.delta_f = 1.0 / seg_len
self.time_len = int(seg_len * self.sample_rate)
self.freq_len = self.time_len // 2 + 1
seg_end_pad = segment_end_pad
seg_start_pad = segment_start_pad
if not trigger_start:
trigger_start = int(strain.start_time) + segment_start_pad
else:
if not allow_zero_padding:
min_start_time = int(strain.start_time) + segment_start_pad
else:
min_start_time = int(strain.start_time)
if trigger_start < min_start_time:
err_msg = "Trigger start time must be within analysable "
err_msg += "window. Asked to start from %d " %(trigger_start)
err_msg += "but can only analyse from %d." %(min_start_time)
raise ValueError(err_msg)
if not trigger_end:
trigger_end = int(strain.end_time) - segment_end_pad
else:
if not allow_zero_padding:
max_end_time = int(strain.end_time) - segment_end_pad
else:
max_end_time = int(strain.end_time)
if trigger_end > max_end_time:
err_msg = "Trigger end time must be within analysable "
err_msg += "window. Asked to end at %d " %(trigger_end)
err_msg += "but can only analyse to %d." %(max_end_time)
raise ValueError(err_msg)
throwaway_size = seg_start_pad + seg_end_pad
seg_width = seg_len - throwaway_size
# The amount of time we can actually analyze given the
# amount of padding that is needed
analyzable = trigger_end - trigger_start
data_start = (trigger_start - segment_start_pad) - \
int(strain.start_time)
data_end = trigger_end + segment_end_pad - int(strain.start_time)
data_dur = data_end - data_start
data_start = data_start * strain.sample_rate
data_end = data_end * strain.sample_rate
#number of segments we need to analyze this data
num_segs = int(numpy.ceil(float(analyzable) / float(seg_width)))
# The offset we will use between segments
seg_offset = int(numpy.ceil(analyzable / float(num_segs)))
self.segment_slices = []
self.analyze_slices = []
# Determine how to chop up the strain into smaller segments
for nseg in range(num_segs-1):
# boundaries for time slices into the strain
seg_start = int(data_start + (nseg*seg_offset) * strain.sample_rate)
seg_end = int(seg_start + seg_len * strain.sample_rate)
seg_slice = slice(seg_start, seg_end)
self.segment_slices.append(seg_slice)
# boundaries for the analyzable portion of the segment
ana_start = int(seg_start_pad * strain.sample_rate)
ana_end = int(ana_start + seg_offset * strain.sample_rate)
ana_slice = slice(ana_start, ana_end)
self.analyze_slices.append(ana_slice)
# The last segment takes up any integer boundary slop
seg_end = int(data_end)
seg_start = int(seg_end - seg_len * strain.sample_rate)
seg_slice = slice(seg_start, seg_end)
self.segment_slices.append(seg_slice)
remaining = (data_dur - ((num_segs - 1) * seg_offset + seg_start_pad))
ana_start = int((seg_len - remaining) * strain.sample_rate)
ana_end = int((seg_len - seg_end_pad) * strain.sample_rate)
ana_slice = slice(ana_start, ana_end)
self.analyze_slices.append(ana_slice)
self.full_segment_slices = copy.deepcopy(self.segment_slices)
#Remove segments that are outside trig start and end
segment_slices_red = []
analyze_slices_red = []
trig_start_idx = (trigger_start - int(strain.start_time)) * strain.sample_rate
trig_end_idx = (trigger_end - int(strain.start_time)) * strain.sample_rate
if filter_inj_only and hasattr(strain, 'injections'):
end_times = strain.injections.end_times()
end_times = [time for time in end_times if float(time) < trigger_end and float(time) > trigger_start]
inj_idx = [(float(time) - float(strain.start_time)) * strain.sample_rate for time in end_times]
for seg, ana in zip(self.segment_slices, self.analyze_slices):
start = ana.start
stop = ana.stop
cum_start = start + seg.start
cum_end = stop + seg.start
# adjust first segment
if trig_start_idx > cum_start:
start += (trig_start_idx - cum_start)
# adjust last segment
if trig_end_idx < cum_end:
stop -= (cum_end - trig_end_idx)
if filter_inj_only and hasattr(strain, 'injections'):
analyze_this = False
inj_window = strain.sample_rate * 8
for inj_id in inj_idx:
if inj_id < (cum_end + inj_window) and \
inj_id > (cum_start - inj_window):
analyze_this = True
if not analyze_this:
continue
if start < stop:
segment_slices_red.append(seg)
analyze_slices_red.append(slice(start, stop))
self.segment_slices = segment_slices_red
self.analyze_slices = analyze_slices_red
def fourier_segments(self):
""" Return a list of the FFT'd segments.
Return the list of FrequencySeries. Additional properties are
added that describe the strain segment. The property 'analyze'
is a slice corresponding to the portion of the time domain equivalent
of the segment to analyze for triggers. The value 'cumulative_index'
indexes from the beginning of the original strain series.
"""
if not self._fourier_segments:
self._fourier_segments = []
for seg_slice, ana in zip(self.segment_slices, self.analyze_slices):
if seg_slice.start >= 0 and seg_slice.stop <= len(self.strain):
freq_seg = make_frequency_series(self.strain[seg_slice])
# Assume that we cannot have a case where we both zero-pad on
# both sides
elif seg_slice.start < 0:
strain_chunk = self.strain[:seg_slice.stop]
strain_chunk.prepend_zeros(-seg_slice.start)
freq_seg = make_frequency_series(strain_chunk)
elif seg_slice.stop > len(self.strain):
strain_chunk = self.strain[seg_slice.start:]
strain_chunk.append_zeros(seg_slice.stop - len(self.strain))
freq_seg = make_frequency_series(strain_chunk)
freq_seg.analyze = ana
freq_seg.cumulative_index = seg_slice.start + ana.start
freq_seg.seg_slice = seg_slice
self._fourier_segments.append(freq_seg)
return self._fourier_segments
@classmethod
def from_cli(cls, opt, strain):
"""Calculate the segmentation of the strain data for analysis from
the command line options.
"""
return cls(strain, segment_length=opt.segment_length,
segment_start_pad=opt.segment_start_pad,
segment_end_pad=opt.segment_end_pad,
trigger_start=opt.trig_start_time,
trigger_end=opt.trig_end_time,
filter_inj_only=opt.filter_inj_only,
injection_window=opt.injection_window,
allow_zero_padding=opt.allow_zero_padding)
@classmethod
def insert_segment_option_group(cls, parser):
segment_group = parser.add_argument_group(
"Options for segmenting the strain",
"These options are used to determine how to "
"segment the strain into smaller chunks, "
"and for determining the portion of each to "
"analyze for triggers. ")
segment_group.add_argument("--trig-start-time", type=int, default=0,
help="(optional) The gps time to start recording triggers")
segment_group.add_argument("--trig-end-time", type=int, default=0,
help="(optional) The gps time to stop recording triggers")
segment_group.add_argument("--segment-length", type=int,
help="The length of each strain segment in seconds.")
segment_group.add_argument("--segment-start-pad", type=int,
help="The time in seconds to ignore of the "
"beginning of each segment in seconds. ")
segment_group.add_argument("--segment-end-pad", type=int,
help="The time in seconds to ignore at the "
"end of each segment in seconds.")
segment_group.add_argument("--allow-zero-padding", action='store_true',
help="Allow for zero padding of data to "
"analyze requested times, if needed.")
# Injection optimization options
segment_group.add_argument("--filter-inj-only", action='store_true',
help="Analyze only segments that contain an injection.")
segment_group.add_argument("--injection-window", default=None,
type=float, help="""If using --filter-inj-only then
only search for injections within +/- injection
window of the injections's end time. This is useful
to speed up a coherent search or a search where we
initially filter at lower sample rate, and then
filter at full rate where needed. NOTE: Reverts to
full analysis if two injections are in the same
segment.""")
@classmethod
def from_cli_single_ifo(cls, opt, strain, ifo):
"""Calculate the segmentation of the strain data for analysis from
the command line options.
"""
return cls(strain, segment_length=opt.segment_length[ifo],
segment_start_pad=opt.segment_start_pad[ifo],
segment_end_pad=opt.segment_end_pad[ifo],
trigger_start=opt.trig_start_time[ifo],
trigger_end=opt.trig_end_time[ifo],
filter_inj_only=opt.filter_inj_only,
allow_zero_padding=opt.allow_zero_padding)
@classmethod
def from_cli_multi_ifos(cls, opt, strain_dict, ifos):
"""Calculate the segmentation of the strain data for analysis from
the command line options.
"""
strain_segments = {}
for ifo in ifos:
strain_segments[ifo] = cls.from_cli_single_ifo(
opt, strain_dict[ifo], ifo)
return strain_segments
@classmethod
def insert_segment_option_group_multi_ifo(cls, parser):
segment_group = parser.add_argument_group(
"Options for segmenting the strain",
"These options are used to determine how to "
"segment the strain into smaller chunks, "
"and for determining the portion of each to "
"analyze for triggers. ")
segment_group.add_argument("--trig-start-time", type=int, default=0,
nargs='+', action=MultiDetOptionAction, metavar='IFO:TIME',
help="(optional) The gps time to start recording triggers")
segment_group.add_argument("--trig-end-time", type=int, default=0,
nargs='+', action=MultiDetOptionAction, metavar='IFO:TIME',
help="(optional) The gps time to stop recording triggers")
segment_group.add_argument("--segment-length", type=int,
nargs='+', action=MultiDetOptionAction,
metavar='IFO:LENGTH',
help="The length of each strain segment in seconds.")
segment_group.add_argument("--segment-start-pad", type=int,
nargs='+', action=MultiDetOptionAction, metavar='IFO:TIME',
help="The time in seconds to ignore of the "
"beginning of each segment in seconds. ")
segment_group.add_argument("--segment-end-pad", type=int,
nargs='+', action=MultiDetOptionAction, metavar='IFO:TIME',
help="The time in seconds to ignore at the "
"end of each segment in seconds.")
segment_group.add_argument("--allow-zero-padding", action='store_true',
help="Allow for zero padding of data to analyze "
"requested times, if needed.")
segment_group.add_argument("--filter-inj-only", action='store_true',
help="Analyze only segments that contain "
"an injection.")
required_opts_list = ['--segment-length',
'--segment-start-pad',
'--segment-end-pad',
]
@classmethod
def verify_segment_options(cls, opt, parser):
required_opts(opt, parser, cls.required_opts_list)
@classmethod
def verify_segment_options_multi_ifo(cls, opt, parser, ifos):
for ifo in ifos:
required_opts_multi_ifo(opt, parser, ifo, cls.required_opts_list)
class StrainBuffer(pycbc.frame.DataBuffer):
def __init__(self, frame_src, channel_name, start_time,
max_buffer=512,
sample_rate=4096,
low_frequency_cutoff=20,
highpass_frequency=15.0,
highpass_reduction=200.0,
highpass_bandwidth=5.0,
psd_samples=30,
psd_segment_length=4,
psd_inverse_length=3.5,
trim_padding=0.25,
autogating_threshold=None,
autogating_cluster=None,
autogating_pad=None,
autogating_width=None,
autogating_taper=None,
state_channel=None,
data_quality_channel=None,
dyn_range_fac=pycbc.DYN_RANGE_FAC,
psd_abort_difference=None,
psd_recalculate_difference=None,
force_update_cache=True,
increment_update_cache=None,
analyze_flags=None,
data_quality_flags=None,
dq_padding=0):
""" Class to produce overwhitened strain incrementally
Parameters
----------
frame_src: str of list of strings
Strings that indicate where to read from files from. This can be a
list of frame files, a glob, etc.
channel_name: str
Name of the channel to read from the frame files
start_time:
Time to start reading from.
max_buffer: {int, 512}, Optional
Length of the buffer in seconds
sample_rate: {int, 2048}, Optional
Rate in Hz to sample the data.
low_frequency_cutoff: {float, 20}, Optional
The low frequency cutoff to use for inverse spectrum truncation
highpass_frequency: {float, 15}, Optional
The frequency to apply a highpass filter at before downsampling.
highpass_reduction: {float, 200}, Optional
The amount of reduction to apply to the low frequencies.
highpass_bandwidth: {float, 5}, Optional
The width of the transition region for the highpass filter.
psd_samples: {int, 30}, Optional
The number of sample to use for psd estimation
psd_segment_length: {float, 4}, Optional
The number of seconds in each psd sample.
psd_inverse_length: {float, 3.5}, Optional
The length in seconds for fourier transform of the inverse of the
PSD to be truncated to.
trim_padding: {float, 0.25}, Optional
Amount of padding in seconds to give for truncated the overwhitened
data stream.
autogating_threshold: float, Optional
Sigma deviation required to cause autogating of data.
If None, no autogating is performed.
autogating_cluster: float, Optional
Seconds to cluster possible gating locations.
autogating_pad: float, Optional
Seconds of corrupted whitened strain to ignore when generating a gate.
autogating_width: float, Optional
Half-duration of the zeroed-out portion of autogates.
autogating_taper: float, Optional
Duration of taper on either side of the gating window in seconds.
state_channel: {str, None}, Optional
Channel to use for state information about the strain
data_quality_channel: {str, None}, Optional
Channel to use for data quality information about the strain
dyn_range_fac: {float, pycbc.DYN_RANGE_FAC}, Optional
Scale factor to apply to strain
psd_abort_difference: {float, None}, Optional
The relative change in the inspiral range from the previous PSD
estimate to trigger the data to be considered invalid.
psd_recalculate_difference: {float, None}, Optional
the relative change in the inspiral range from the previous PSD
to trigger a re-estimatoin of the PSD.
force_update_cache: {boolean, True}, Optional
Re-check the filesystem for frame files on every attempt to
read more data.
analyze_flags: list of strs
The flags that must be on to mark the current data as valid for
*any* use.
data_quality_flags: list of strs
The flags used to determine if to keep triggers.
dq_padding: {float, 0}, optional
Extra seconds to consider invalid before/after times with bad DQ.
increment_update_cache: {str, None}, Optional
Pattern to look for frame files in a GPS dependent directory. This
is an alternate to the forced updated of the frame cache, and
apptempts to predict the next frame file name without probing the
filesystem.
"""
super(StrainBuffer, self).__init__(frame_src, channel_name, start_time,
max_buffer=32,
force_update_cache=force_update_cache,
increment_update_cache=increment_update_cache)
self.low_frequency_cutoff = low_frequency_cutoff
# Set up status buffers
self.analyze_flags = analyze_flags
self.data_quality_flags = data_quality_flags
self.state = None
self.dq = None
self.dq_padding = dq_padding
# State channel
if state_channel is not None:
valid_mask = pycbc.frame.flag_names_to_bitmask(self.analyze_flags)
logging.info('State channel %s interpreted as bitmask %s = good',
state_channel, bin(valid_mask))
self.state = pycbc.frame.StatusBuffer(
frame_src,
state_channel, start_time,
max_buffer=max_buffer,
valid_mask=valid_mask,
force_update_cache=force_update_cache,
increment_update_cache=increment_update_cache)
# low latency dq channel
if data_quality_channel is not None:
sb_kwargs = dict(max_buffer=max_buffer,
force_update_cache=force_update_cache,
increment_update_cache=increment_update_cache)
if len(self.data_quality_flags) == 1 \
and self.data_quality_flags[0] == 'veto_nonzero':
sb_kwargs['valid_on_zero'] = True
logging.info('DQ channel %s interpreted as zero = good',
data_quality_channel)
else:
sb_kwargs['valid_mask'] = pycbc.frame.flag_names_to_bitmask(
self.data_quality_flags)
logging.info('DQ channel %s interpreted as bitmask %s = good',
data_quality_channel, bin(valid_mask))
self.dq = pycbc.frame.StatusBuffer(frame_src, data_quality_channel,
start_time, **sb_kwargs)
self.highpass_frequency = highpass_frequency
self.highpass_reduction = highpass_reduction
self.highpass_bandwidth = highpass_bandwidth
self.autogating_threshold = autogating_threshold
self.autogating_cluster = autogating_cluster
self.autogating_pad = autogating_pad
self.autogating_width = autogating_width
self.autogating_taper = autogating_taper
self.gate_params = []
self.sample_rate = sample_rate
self.dyn_range_fac = dyn_range_fac
self.psd_abort_difference = psd_abort_difference
self.psd_recalculate_difference = psd_recalculate_difference
self.psd_segment_length = psd_segment_length
self.psd_samples = psd_samples
self.psd_inverse_length = psd_inverse_length
self.psd = None
self.psds = {}
strain_len = int(max_buffer * self.sample_rate)
self.strain = TimeSeries(zeros(strain_len, dtype=numpy.float32),
delta_t=1.0/self.sample_rate,
epoch=start_time-max_buffer)
# Determine the total number of corrupted samples for highpass
# and PSD over whitening
highpass_samples, self.beta = kaiserord(self.highpass_reduction,
self.highpass_bandwidth / self.raw_buffer.sample_rate * 2 * numpy.pi)
self.highpass_samples = int(highpass_samples / 2)
resample_corruption = 10 # If using the ldas method
self.factor = int(1.0 / self.raw_buffer.delta_t / self.sample_rate)
self.corruption = self.highpass_samples // self.factor + resample_corruption
self.psd_corruption = self.psd_inverse_length * self.sample_rate
self.total_corruption = self.corruption + self.psd_corruption
# Determine how much padding is needed after removing the parts
# associated with PSD over whitening and highpass filtering
self.trim_padding = int(trim_padding * self.sample_rate)
if self.trim_padding > self.total_corruption:
self.trim_padding = self.total_corruption
self.psd_duration = (psd_samples - 1) // 2 * psd_segment_length
self.reduced_pad = int(self.total_corruption - self.trim_padding)
self.segments = {}
# time to ignore output of frame (for initial buffering)
self.add_hard_count()
self.taper_immediate_strain = True
@property
def start_time(self):
""" Return the start time of the current valid segment of data """
return self.end_time - self.blocksize
@property
def end_time(self):
""" Return the end time of the current valid segment of data """
return float(self.strain.start_time + (len(self.strain) - self.total_corruption) / self.sample_rate)
def add_hard_count(self):
""" Reset the countdown timer, so that we don't analyze data long enough
to generate a new PSD.
"""
self.wait_duration = int(numpy.ceil(self.total_corruption / self.sample_rate + self.psd_duration))
self.invalidate_psd()
def invalidate_psd(self):
""" Make the current PSD invalid. A new one will be generated when
it is next required """
self.psd = None
self.psds = {}
def recalculate_psd(self):
""" Recalculate the psd
"""
seg_len = int(self.sample_rate * self.psd_segment_length)
e = len(self.strain)
s = e - (self.psd_samples + 1) * seg_len // 2
psd = pycbc.psd.welch(self.strain[s:e], seg_len=seg_len, seg_stride=seg_len//2)
psd.dist = spa_distance(psd, 1.4, 1.4, self.low_frequency_cutoff) * pycbc.DYN_RANGE_FAC
# If the new psd is similar to the old one, don't replace it
if self.psd and self.psd_recalculate_difference:
if abs(self.psd.dist - psd.dist) / self.psd.dist < self.psd_recalculate_difference:
logging.info("Skipping recalculation of %s PSD, %s-%s",
self.detector, self.psd.dist, psd.dist)
return True
# If the new psd is *really* different than the old one, return an error
if self.psd and self.psd_abort_difference:
if abs(self.psd.dist - psd.dist) / self.psd.dist > self.psd_abort_difference:
logging.info("%s PSD is CRAZY, aborting!!!!, %s-%s",
self.detector, self.psd.dist, psd.dist)
self.psd = psd
self.psds = {}
return False
# If the new estimate replaces the current one, invalide the ineterpolate PSDs
self.psd = psd
self.psds = {}
logging.info("Recalculating %s PSD, %s", self.detector, psd.dist)
return True
def overwhitened_data(self, delta_f):
""" Return overwhitened data
Parameters
----------
delta_f: float
The sample step to generate overwhitened frequency domain data for
Returns
-------
htilde: FrequencySeries
Overwhited strain data
"""
# we haven't already computed htilde for this delta_f
if delta_f not in self.segments:
buffer_length = int(1.0 / delta_f)
e = len(self.strain)
s = int(e - buffer_length * self.sample_rate - self.reduced_pad * 2)
fseries = make_frequency_series(self.strain[s:e])
# we haven't calculated a resample psd for this delta_f
if delta_f not in self.psds:
psdt = pycbc.psd.interpolate(self.psd, fseries.delta_f)
psdt = pycbc.psd.inverse_spectrum_truncation(psdt,
int(self.sample_rate * self.psd_inverse_length),
low_frequency_cutoff=self.low_frequency_cutoff)
psdt._delta_f = fseries.delta_f
psd = pycbc.psd.interpolate(self.psd, delta_f)
psd = pycbc.psd.inverse_spectrum_truncation(psd,
int(self.sample_rate * self.psd_inverse_length),
low_frequency_cutoff=self.low_frequency_cutoff)
psd.psdt = psdt
self.psds[delta_f] = psd
psd = self.psds[delta_f]
fseries /= psd.psdt
# trim ends of strain
if self.reduced_pad != 0:
overwhite = TimeSeries(zeros(e-s, dtype=self.strain.dtype),
delta_t=self.strain.delta_t)
pycbc.fft.ifft(fseries, overwhite)
overwhite2 = overwhite[self.reduced_pad:len(overwhite)-self.reduced_pad]
taper_window = self.trim_padding / 2.0 / overwhite.sample_rate
gate_params = [(overwhite2.start_time, 0., taper_window),
(overwhite2.end_time, 0., taper_window)]
gate_data(overwhite2, gate_params)
fseries_trimmed = FrequencySeries(zeros(len(overwhite2) // 2 + 1,
dtype=fseries.dtype), delta_f=delta_f)
pycbc.fft.fft(overwhite2, fseries_trimmed)
fseries_trimmed.start_time = fseries.start_time + self.reduced_pad * self.strain.delta_t
else:
fseries_trimmed = fseries
fseries_trimmed.psd = psd
self.segments[delta_f] = fseries_trimmed
stilde = self.segments[delta_f]
return stilde
def near_hwinj(self):
"""Check that the current set of triggers could be influenced by
a hardware injection.
"""
if not self.state:
return False
if not self.state.is_extent_valid(self.start_time, self.blocksize, pycbc.frame.NO_HWINJ):
return True
return False
def null_advance_strain(self, blocksize):
""" Advance and insert zeros
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
"""
sample_step = int(blocksize * self.sample_rate)
csize = sample_step + self.corruption * 2
self.strain.roll(-sample_step)
# We should roll this off at some point too...
self.strain[len(self.strain) - csize + self.corruption:] = 0
self.strain.start_time += blocksize
# The next time we need strain will need to be tapered
self.taper_immediate_strain = True
def advance(self, blocksize, timeout=10):
"""Advanced buffer blocksize seconds.
Add blocksize seconds more to the buffer, push blocksize seconds
from the beginning.
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
Returns
-------
status: boolean
Returns True if this block is analyzable.
"""
ts = super(StrainBuffer, self).attempt_advance(blocksize, timeout=timeout)
self.blocksize = blocksize
self.gate_params = []
# We have given up so there is no time series
if ts is None:
logging.info("%s frame is late, giving up", self.detector)
self.null_advance_strain(blocksize)
if self.state:
self.state.null_advance(blocksize)
if self.dq:
self.dq.null_advance(blocksize)
return False
# We collected some data so we are closer to being able to analyze data
self.wait_duration -= blocksize
# If the data we got was invalid, reset the counter on how much to collect
# This behavior corresponds to how we handle CAT1 vetoes
if self.state and self.state.advance(blocksize) is False:
self.add_hard_count()
self.null_advance_strain(blocksize)
if self.dq:
self.dq.null_advance(blocksize)
logging.info("%s time has invalid data, resetting buffer",
self.detector)
return False
# Also advance the dq vector in lockstep
if self.dq:
self.dq.advance(blocksize)
self.segments = {}
# only condition with the needed raw data so we can continuously add
# to the existing result
# Precondition
sample_step = int(blocksize * self.sample_rate)
csize = sample_step + self.corruption * 2
start = len(self.raw_buffer) - csize * self.factor
strain = self.raw_buffer[start:]
strain = pycbc.filter.highpass_fir(strain, self.highpass_frequency,
self.highpass_samples,
beta=self.beta)
strain = (strain * self.dyn_range_fac).astype(numpy.float32)
strain = pycbc.filter.resample_to_delta_t(strain,
1.0/self.sample_rate, method='ldas')
# remove corruption at beginning
strain = strain[self.corruption:]
# taper beginning if needed
if self.taper_immediate_strain:
logging.info("Tapering start of %s strain block", self.detector)
strain = gate_data(
strain, [(strain.start_time, 0., self.autogating_taper)])
self.taper_immediate_strain = False
# Stitch into continuous stream
self.strain.roll(-sample_step)
self.strain[len(self.strain) - csize + self.corruption:] = strain[:]
self.strain.start_time += blocksize
# apply gating if needed
if self.autogating_threshold is not None:
glitch_times = detect_loud_glitches(
strain[:-self.corruption],
psd_duration=2., psd_stride=1.,
threshold=self.autogating_threshold,
cluster_window=self.autogating_cluster,
low_freq_cutoff=self.highpass_frequency,
corrupt_time=self.autogating_pad)
if len(glitch_times) > 0:
logging.info('Autogating %s at %s', self.detector,
', '.join(['%.3f' % gt for gt in glitch_times]))
self.gate_params = \
[(gt, self.autogating_width, self.autogating_taper)
for gt in glitch_times]
self.strain = gate_data(self.strain, self.gate_params)
if self.psd is None and self.wait_duration <=0:
self.recalculate_psd()
return self.wait_duration <= 0
@classmethod
def from_cli(cls, ifo, args, maxlen):
"""Initialize a StrainBuffer object (data reader) for a particular
detector.
"""
state_channel = analyze_flags = None
if args.state_channel and ifo in args.state_channel \
and args.analyze_flags and ifo in args.analyze_flags:
state_channel = ':'.join([ifo, args.state_channel[ifo]])
analyze_flags = args.analyze_flags[ifo].split(',')
dq_channel = dq_flags = None
if args.data_quality_channel and ifo in args.data_quality_channel \
and args.data_quality_flags and ifo in args.data_quality_flags:
dq_channel = ':'.join([ifo, args.data_quality_channel[ifo]])
dq_flags = args.data_quality_flags[ifo].split(',')
if args.frame_type:
frame_src = pycbc.frame.frame_paths(args.frame_type[ifo],
args.start_time,
args.end_time)
else:
frame_src = [args.frame_src[ifo]]
strain_channel = ':'.join([ifo, args.channel_name[ifo]])
return cls(frame_src, strain_channel,
args.start_time, max_buffer=maxlen * 2,
state_channel=state_channel,
data_quality_channel=dq_channel,
sample_rate=args.sample_rate,
low_frequency_cutoff=args.low_frequency_cutoff,
highpass_frequency=args.highpass_frequency,
highpass_reduction=args.highpass_reduction,
highpass_bandwidth=args.highpass_bandwidth,
psd_samples=args.psd_samples,
trim_padding=args.trim_padding,
psd_segment_length=args.psd_segment_length,
psd_inverse_length=args.psd_inverse_length,
autogating_threshold=args.autogating_threshold,
autogating_cluster=args.autogating_cluster,
autogating_pad=args.autogating_pad,
autogating_width=args.autogating_width,
autogating_taper=args.autogating_taper,
psd_abort_difference=args.psd_abort_difference,
psd_recalculate_difference=args.psd_recalculate_difference,
force_update_cache=args.force_update_cache,
increment_update_cache=args.increment_update_cache[ifo],
analyze_flags=analyze_flags,
data_quality_flags=dq_flags,
dq_padding=args.data_quality_padding)
|
ahnitz/pycbc
|
pycbc/strain/strain.py
|
Python
|
gpl-3.0
| 80,053
|
[
"Gaussian"
] |
bb2ba3c88d4ffc0a2b04383dd98c8163265af3718f6b13b3210c386ce2c23b1d
|
"""
desispec.bootcalib
==================
Utility functions to perform a quick calibration of DESI data
TODO:
1. Expand to r, i cameras
2. QA plots
3. Test with CR data
"""
from __future__ import print_function, absolute_import, division
import numpy as np
import copy
import pdb
import yaml
import glob
import math
import time
import os
import sys
import argparse
import locale
from pkg_resources import resource_exists, resource_filename
from astropy.modeling import models, fitting
from astropy.stats import sigma_clip
from astropy.table import Table, Column, vstack
from astropy.io import fits
#- support astropy 2.x sigma_clip syntax with `iters` instead of `maxiters`
import astropy
if astropy.__version__.startswith('2.'):
astropy_sigma_clip = sigma_clip
def sigma_clip(data, sigma=None, maxiters=5):
return astropy_sigma_clip(data, sigma=sigma, iters=maxiters)
from desispec.util import set_backend
set_backend()
from matplotlib import pyplot as plt
import matplotlib
import matplotlib.gridspec as gridspec
import matplotlib.cm as cm
from matplotlib.backends.backend_pdf import PdfPages
from desiutil.log import get_logger
from desiutil import funcfits as dufits
from numpy.polynomial.legendre import legval
glbl_figsz = (16,9)
########################################################
# High level wrapper
# TODO: This was ported from the original bin/desi_bootcalib so that it could
# be called independently by quicklook, but it needs to be coordinated with
# desispec.scripts.bootcalib.main()
########################################################
def bootcalib(deg,flatimage,arcimage):
"""
Args:
deg: Legendre polynomial degree to use to fit
flatimage: desispec.image.Image object of flatfield
arcimage: desispec.image.Image object of arc
Mostly inherited from desispec/bin/desi_bootcalib directly as needed
Returns:
xfit, fdicts, gauss, all_wave_soln
TODO: document what those return objects are
"""
camera=flatimage.camera
flat=flatimage.pix
flat[flat<-20]=-20.
ny=flat.shape[0]
xpk,ypos,cut=find_fiber_peaks(flat)
xset,xerr=trace_crude_init(flat,xpk,ypos)
xfit,fdicts=fit_traces(xset,xerr)
gauss=fiber_gauss(flat,xfit,xerr)
#- Also need wavelength solution not just trace
arc=arcimage.pix
arc[arc<-20]=-20.
arc_ivar=arcimage.ivar*(arcimage.mask==0)
all_spec=extract_sngfibers_gaussianpsf(arc,arc_ivar,xfit,gauss)
llist=load_arcline_list(camera)
### dlamb,wmark,gd_lines,line_guess=load_gdarc_lines(camera)
dlamb, gd_lines = load_gdarc_lines(camera, llist)
#- Solve for wavelengths
all_wv_soln=[]
all_dlamb=[]
for ii in range(all_spec.shape[1]):
spec=all_spec[:,ii]
pixpk=find_arc_lines(spec)
id_dict=id_arc_lines(pixpk,gd_lines,dlamb,wmark,line_guess=line_guess)
id_dict['fiber']=ii
#- Find the other good ones
if camera == 'z':
inpoly = 3 # The solution in the z-camera has greater curvature
else:
inpoly = 2
add_gdarc_lines(id_dict, pixpk, gd_lines, inpoly=inpoly)
#- Now the rest
id_remainder(id_dict, pixpk, llist)
#- Final fit wave vs. pix too
final_fit, mask = dufits.iter_fit(np.array(id_dict['id_wave']), np.array(id_dict['id_pix']), 'polynomial', 3, xmin=0., xmax=1.)
rms = np.sqrt(np.mean((dufits.func_val(np.array(id_dict['id_wave'])[mask==0],final_fit)-np.array(id_dict['id_pix'])[mask==0])**2))
final_fit_pix,mask2 = dufits.iter_fit(np.array(id_dict['id_pix']), np.array(id_dict['id_wave']),'legendre',deg, niter=5)
id_dict['final_fit'] = final_fit
id_dict['rms'] = rms
id_dict['final_fit_pix'] = final_fit_pix
id_dict['wave_min'] = dufits.func_val(0,final_fit_pix)
id_dict['wave_max'] = dufits.func_val(ny-1,final_fit_pix)
id_dict['mask'] = mask
all_wv_soln.append(id_dict)
return xfit, fdicts, gauss,all_wv_soln
########################################################
# Arc/Wavelength Routines (Linelists come next)
########################################################
def find_arc_lines(spec,rms_thresh=7.,nwidth=5):
"""Find and centroid arc lines in an input spectrum
Parameters
----------
spec : ndarray
Arc line spectrum
rms_thresh : float
RMS threshold scale
nwidth : int
Line width to test over
"""
# Threshold criterion
npix = spec.size
spec_mask = sigma_clip(spec, sigma=4., maxiters=5)
rms = np.std(spec_mask)
thresh = rms*rms_thresh
#print("thresh = {:g}".format(thresh))
gdp = spec > thresh
# Avoid edges
gdp = gdp & (np.arange(npix) > 2.*nwidth) & (np.arange(npix) < (npix-2.*nwidth))
# Roll to find peaks (simple algorithm)
# nwidth = 5
nstep = max(1,nwidth // 2)
for kk in range(-nstep,nstep):
if kk < 0:
test = np.roll(spec,kk) < np.roll(spec,kk+1)
else:
test = np.roll(spec,kk) > np.roll(spec,kk+1)
# Compare
gdp = gdp & test
# Center
gdpix = np.where(gdp)[0]
ngd = gdpix.size
xpk = np.zeros(ngd)
flux = np.zeros(ngd)
for jj,igdpix in enumerate(gdpix):
# Simple flux-weight
pix = np.arange(igdpix-nstep,igdpix+nstep+1,dtype=int)
flux[jj] = np.sum(spec[pix])
xpk[jj] = np.sum(pix*spec[pix]) / flux[jj]
# Finish
return xpk , flux
def remove_duplicates_w_id(wy,w,y_id,w_id) :
# might be several identical w_id
y_id=np.array(y_id).astype(int)
w_id=np.array(w_id).astype(int)
y_id2=[]
w_id2=[]
for j in np.unique(w_id) :
w_id2.append(j)
ii=y_id[w_id==j]
if ii.size==1 :
y_id2.append(ii[0])
else :
i=np.argmin(np.abs(wy[ii]-w[j]))
y_id2.append(ii[i])
y_id2=np.array(y_id2).astype(int)
w_id2=np.array(w_id2).astype(int)
tmp=np.argsort(w[w_id2])
y_id2=y_id2[tmp]
w_id2=w_id2[tmp]
return y_id2,w_id2
def remove_duplicates_y_id(yw,y,y_id,w_id) :
# might be several identical y_id
w_id=np.array(w_id).astype(int)
y_id=np.array(y_id).astype(int)
w_id2=[]
y_id2=[]
for j in np.unique(y_id) :
y_id2.append(j)
ii=w_id[y_id==j]
if ii.size==1 :
w_id2.append(ii[0])
else :
i=np.argmin(np.abs(yw[ii]-y[j]))
w_id2.append(ii[i])
w_id2=np.array(w_id2).astype(int)
y_id2=np.array(y_id2).astype(int)
tmp=np.argsort(y[y_id2])
w_id2=w_id2[tmp]
y_id2=y_id2[tmp]
return y_id2,w_id2
def refine_solution(y,w,y_id,w_id,deg=3,tolerance=5.) :
log = get_logger()
# remove duplicates
transfo=np.poly1d(np.polyfit(y[y_id],w[w_id],deg=deg))
wy=transfo(y)
y_id,w_id=remove_duplicates_w_id(wy,w,y_id,w_id)
transfo=np.poly1d(np.polyfit(w[w_id],y[y_id],deg=deg))
yw=transfo(w)
y_id,w_id=remove_duplicates_y_id(yw,y,y_id,w_id)
if len(y_id) != len(np.unique(y_id)) :
log.error("duplicate AT INIT y_id={:s}".format(str(y_id)))
if len(w_id) != len(np.unique(w_id)) :
log.error("duplicate AT INIT w_id={:s}".format(str(w_id)))
nmatch=len(y_id)
#log.info("init nmatch=%d rms=%f wave=%s"%(nmatch,np.std(wy[y_id]-w[w_id]),w[w_id]))
#log.info("init nmatch=%d rms=%f"%(nmatch,np.std(wy[y_id]-w[w_id])))
if nmatch<deg+1 :
log.error("error : init nmatch too small")
return y_id,w_id,1000.,0
rms=0.
# loop on fit of transfo, pairing, cleaning
for loop in range(200) :
# compute transfo
transfo=np.poly1d(np.polyfit(y[y_id],w[w_id],deg=deg))
# apply transfo to measurements
wy=transfo(y)
previous_rms = rms+0.
rms=np.std(wy[y_id]-w[w_id])
# match lines
mdiff0=min(tolerance,max(2.,rms*2.)) # this is a difficult parameter to tune, either loose lever arm, or have false matches !!
mdiff1=tolerance # this is a difficult parameter to tune, either loose lever arm, or have false matches !!
unmatched_indices=np.setdiff1d(np.arange(y.size),y_id)
for i,wi in zip(unmatched_indices,wy[unmatched_indices]) :
dist=np.abs(wi-w)
jj=np.argsort(dist)
for j,o in enumerate(jj) :
if j in w_id :
continue
if dist[j]<mdiff0 or ( o<jj.size-1 and dist[j]<mdiff1 and dist[j]<0.3*dist[jj[o+1]]) :
y_id=np.append(y_id,i)
w_id=np.append(w_id,j)
break
previous_nmatch = nmatch+0
nmatch=len(y_id)
#log.info("iter #%d nmatch=%d rms=%f"%(loop,nmatch,rms))
if nmatch < deg+1 :
log.error("error init nmatch too small")
y_id=[]
w_id=[]
rms=100000.
return y_id,w_id,rms,loop
if nmatch==previous_nmatch and abs(rms-previous_rms)<0.01 and loop>=1 :
break
if nmatch>=min(w.size,y.size) :
#print("break because %d>=min(%d,%d)"%(nmatch,w.size,y.size))
break
return y_id,w_id,rms,loop
def id_remainder(id_dict, llist, deg=4, tolerance=1., verbose=False) :
log = get_logger()
y_id=np.array(id_dict['id_idx']).astype(int)
all_y=np.array(id_dict['pixpk'])
all_known_waves = np.sort(np.array(llist["wave"]))
identified_waves = np.array(id_dict["id_wave"]) # lines identified at previous steps
w_id=[]
for w in identified_waves :
i=np.argmin(np.abs(all_known_waves-w))
diff=np.abs(all_known_waves[i]-w)
if diff>0.1 :
log.warning("discrepant wavelength".format(w,all_known_waves[i]))
w_id.append(i)
w_id = np.array(w_id).astype(int)
y_id,w_id,rms,niter=refine_solution(all_y,all_known_waves,y_id,w_id,deg=deg,tolerance=tolerance)
id_dict['id_idx'] = np.sort(y_id)
id_dict['id_pix'] = np.sort(all_y[y_id])
id_dict['id_wave'] = np.sort(all_known_waves[w_id])
id_dict['rms'] = rms
log.info("{:d} matched for {:d} detected and {:d} known, rms = {:g}".format(len(y_id),len(all_y),len(all_known_waves),rms))
def compute_triplets(wave) :
triplets=[]
wave=np.sort(wave)
for i1,w1 in enumerate(wave[:-1]) :
for i2,w2 in enumerate(wave[i1+1:]) :
for i3,w3 in enumerate(wave[i1+i2+2:]) :
triplet=[w1,w2,w3,i1,i1+1+i2,i1+i2+2+i3,w2-w1,w3-w1,w2**2-w1**2,w3**2-w1**2]
#print(triplet)
#print(wave[i1],wave[i1+1+i2],wave[i1+i2+2+i3])
triplets.append(triplet)
return np.array(triplets)
def id_arc_lines_using_triplets(id_dict,w,dwdy_prior,d2wdy2_prior=1.5e-5,toler=0.2,ntrack=50,nmax=40):
"""Match (as best possible), a set of the input list of expected arc lines to the detected list
Parameters
----------
id_dict : dictionnary with Pixel locations of detected arc lines in "pixpk" and fluxes in "flux"
w : ndarray
array of expected arc lines to be detected and identified
dwdy : float
Average dispersion in the spectrum
d2wdy2_prior : float
Prior on second derivative
toler : float, optional
Tolerance for matching (20%)
ntrack : max. number of solutions to be tracked
Returns
-------
id_dict : dict
dict of identified lines
"""
log=get_logger()
#log.info("y=%s"%str(y))
#log.info("w=%s"%str(w))
y = id_dict["pixpk"]
log.info("ny=%d nw=%d"%(len(y),len(w)))
if nmax<10 :
nmax=10
log.warning("force nmax=10 (arg was too small: {:d})".format(nmax))
if len(y)>nmax :
# log.info("down-selecting the number of detected lines from {:d} to {:d}".format(len(y),nmax))
# keep at least the edges
margin=3
new_y=np.append(y[:margin],y[-margin:])
# now look at the flux to select the other ones
flux=id_dict["flux"][margin:-margin]
ii=np.argsort(flux)
new_y=np.append(new_y,y[margin:-margin][ii[-(nmax-2*margin):]])
y = np.sort(new_y)
# compute triplets of waves of y positions
y_triplets = compute_triplets(y)
w_triplets = compute_triplets(w)
# each pair of triplet defines a 2nd order polynomial (chosen centered on y=2000)
# w = a*(y-2000)**2+b*(y-2000)+c
# w = a*y**2-4000*a*y+b*y+cst
# w = a*(y**2-4000*y)+b*y+cst
# dw_12 = a*(dy2_12-4000*dy_12)+b*dy_12
# dw_13 = a*(dy2_13-4000*dy_13)+b*dy_12
# dw_12 = a*cdy2_12+b*dy_12
# dw_13 = a*cdy2_13+b*dy_13
# with cdy2_12=dy2_12-4000*dy_12
# and cdy2_13=dy2_13-4000*dy_13
# idet = 1./(dy_13*cdy2_12-dy_12*cdy2_13)
# a = idet*(dy_13*dw_12-dy_12*dw_13)
# b = idet*(-cdy2_13*dw_12+cdy2_12*dw_13)
#triplet=[w1,w2,w3,i1,i1+1+i2,i1+i2+2+i3,w2-w1,w3-w1,w2**2-w1**2,w3**2-w1**2]
dy_12=y_triplets[:,6]
dy_13=y_triplets[:,7]
#dy2_12=y_triplets[:,8]
#dy2_13=y_triplets[:,9]
# centered version
cdy2_12=y_triplets[:,8]-4000.*y_triplets[:,6]
cdy2_13=y_triplets[:,9]-4000.*y_triplets[:,7]
idet=1./(dy_13*cdy2_12-dy_12*cdy2_13)
# fill histogram with polynomial coefs and first index of each triplet in the pair for all pairs of triplets(y,w)
# create the 4D histogram
ndwdy = 41
nd2wdy2 = 21
dwdy_min = dwdy_prior*(1-toler)
dwdy_max = dwdy_prior*(1+toler)
dwdy_step = (dwdy_max-dwdy_min)/ndwdy
d2wdy2_min = -d2wdy2_prior
d2wdy2_max = +d2wdy2_prior
d2wdy2_step = (d2wdy2_max-d2wdy2_min)/nd2wdy2
histogram = np.zeros((ndwdy,nd2wdy2,len(y),len(w))) # definition of the histogram
# fill the histogram
for w_triplet in w_triplets :
#d2wdy2 = idet*(dy_13*w_triplet[6]-dy_12*w_triplet[7])
#dwdy = idet*(-cdy2_13*w_triplet[6]+cdy2_12*w_triplet[7])
# bins in the histogram
dwdy_bin = ((idet*(-cdy2_13*w_triplet[6]+cdy2_12*w_triplet[7])-dwdy_min)/dwdy_step).astype(int)
d2wdy2_bin = ((idet*(dy_13*w_triplet[6]-dy_12*w_triplet[7])-d2wdy2_min)/d2wdy2_step).astype(int)
pairs_in_histo=np.where((dwdy_bin>=0)&(dwdy_bin<ndwdy)&(d2wdy2_bin>=0)&(d2wdy2_bin<nd2wdy2))[0]
# fill histo
iw=int(w_triplet[3])
for a,b,c in zip(dwdy_bin[pairs_in_histo],d2wdy2_bin[pairs_in_histo],y_triplets[pairs_in_histo,3].astype(int)) :
histogram[a,b,c,iw] += 1
# find max bins in the histo
histogram_ravel = histogram.ravel()
best_histo_bins = histogram_ravel.argsort()[::-1]
#log.info("nmatch in first bins=%s"%histogram.ravel()[best_histo_bins[:3]])
best_y_id=[]
best_w_id=[]
best_rms=1000.
# loop on best matches ( = most populated bins)
count=0
for histo_bin in best_histo_bins[:ntrack] :
if histogram_ravel[histo_bin]<4 and count>3 :
log.warning("stopping here")
break
count += 1
dwdy_best_bin,d2wdy2_best_bin,iy_best_bin,iw_best_bin = np.unravel_index(histo_bin, histogram.shape) # bin coord
#print("bins=",dwdy_best_bin,d2wdy2_best_bin,iy_best_bin,iw_best_bin)
# pairs of triplets in this histo bin
w_id=np.array([])
y_id=np.array([])
wok=np.where(w_triplets[:,3]==iw_best_bin)[0]
yok=np.where(y_triplets[:,3]==iy_best_bin)[0]
for w_triplet in w_triplets[wok] :
#d2wdy2 = idet[yok]*(dy_13[yok]*w_triplet[6]-dy_12[yok]*w_triplet[7])
#dwdy = idet[yok]*(-cdy2_13[yok]*w_triplet[6]+cdy2_12[yok]*w_triplet[7])
# bins in the histogram
dwdy_bin = ((idet[yok]*(-cdy2_13[yok]*w_triplet[6]+cdy2_12[yok]*w_triplet[7])-dwdy_min)/dwdy_step).astype(int)
d2wdy2_bin = ((idet[yok]*(dy_13[yok]*w_triplet[6]-dy_12[yok]*w_triplet[7])-d2wdy2_min)/d2wdy2_step).astype(int)
wyok=yok[np.where((dwdy_bin==dwdy_best_bin)&(d2wdy2_bin==d2wdy2_best_bin))[0]]
for y_triplet in y_triplets[wyok] :
y_id=np.append(y_id,y_triplet[3:6])
w_id=np.append(w_id,w_triplet[3:6])
# now need to rm duplicates
nw=len(w)
ny=len(y)
unique_common_id=np.unique(y_id.astype(int)*nw+w_id.astype(int))
y_id=(unique_common_id/nw).astype(int)
w_id=(unique_common_id%nw).astype(int)
ordering=np.argsort(y[y_id])
y_id=y_id[ordering]
w_id=w_id[ordering]
# refine
y_id,w_id,rms,niter=refine_solution(y,w,y_id,w_id)
#log.info("get solution with %d match and rms=%f (niter=%d)"%(len(y_id),rms,niter))
if (len(y_id)>len(best_y_id) and rms<max(1,best_rms)) or (len(y_id)==len(best_y_id) and rms<best_rms) or (best_rms>1 and rms<1 and len(y_id)>=8) :
#log.info("new best solution #%d with %d match and rms=%f (niter=%d)"%(count,len(y_id),rms,niter))
#log.info("previous had %d match and rms=%f"%(len(best_y_id),best_rms))
best_y_id = y_id
best_w_id = w_id
best_rms = rms
# stop at some moment
if best_rms<0.2 and len(y_id)>=min(15,min(len(y),len(w))) :
#log.info("stop here because we have a correct solution")
break
if len(y) != len(id_dict["pixpk"]) :
#log.info("re-indexing the result")
tmp_y_id = []
for i in best_y_id :
tmp_y_id.append(np.argmin(np.abs(id_dict["pixpk"]-y[i])))
best_y_id = np.array(tmp_y_id).astype(int)
y = id_dict["pixpk"]
if len(best_w_id) == 0 :
log.error("failed, no match")
id_dict["status"]="failed"
id_dict["id_idx"]=[]
id_dict["id_pix"]=[]
id_dict["id_wave"]=[]
id_dict["rms"]=999.
id_dict["fit"]=None
return
id_dict["status"]="ok"
id_dict["id_idx"]=best_y_id
id_dict["id_pix"]=y[best_y_id]
id_dict["id_wave"]=w[best_w_id]
id_dict["rms"]=best_rms
deg=max(1,min(3,best_y_id.size-2))
id_dict["fit"]= dufits.func_fit(w[best_w_id],y[best_y_id],'polynomial',deg,xmin=0.,xmax=1.)
log.info("{:d} matched for {:d} detected and {:d} known as good, rms = {:g}".format(len(best_y_id),len(y),len(w),best_rms))
########################################################
# Linelist routines
########################################################
def parse_nist(ion, vacuum=True):
"""Parse a NIST ASCII table.
Note that the long ---- should have
been commented out and also the few lines at the start.
Taken from PYPIT
Parameters
----------
ion : str
Name of ion
vaccuum : bool, optional
Use vacuum wavelengths
"""
log=get_logger()
# Find file
medium = 'vacuum'
if not vacuum:
log.info("Using air wavelengths")
medium = 'air'
srch_file = "data/arc_lines/{0}_{1}.ascii".format(ion, medium)
if not resource_exists('desispec', srch_file):
log.error("Cannot find NIST file {:s}".format(srch_file))
raise Exception("Cannot find NIST file {:s}".format(srch_file))
# Read, while working around non-ASCII characters in NIST line lists
nist_file = resource_filename('desispec', srch_file)
log.info("reading NIST file {:s}".format(nist_file))
default_locale = locale.getlocale(locale.LC_CTYPE)
locale.setlocale(locale.LC_CTYPE, 'en_US.UTF-8')
nist_tbl = Table.read(nist_file, format='ascii.fixed_width')
locale.setlocale(locale.LC_CTYPE, default_locale)
gdrow = nist_tbl['Observed'] > 0. # Eliminate dummy lines
nist_tbl = nist_tbl[gdrow]
# Now unique values only (no duplicates)
uniq, indices = np.unique(nist_tbl['Observed'],return_index=True)
nist_tbl = nist_tbl[indices]
# Deal with Rel
agdrel = []
for row in nist_tbl:
try:
gdrel = int(row['Rel.'])
except:
try:
gdrel = int(row['Rel.'][:-1])
except:
gdrel = 0
agdrel.append(gdrel)
agdrel = np.array(agdrel)
# Remove and add
nist_tbl.remove_column('Rel.')
nist_tbl.remove_column('Ritz')
nist_tbl.add_column(Column(agdrel,name='RelInt'))
nist_tbl.add_column(Column([ion]*len(nist_tbl), name='Ion', dtype=(str, 5)))
nist_tbl.rename_column('Observed','wave')
# Return
return nist_tbl
def load_arcline_list(camera, vacuum=True,lamps=None):
"""Loads arc line list from NIST files
Parses and rejects
Taken from PYPIT
Parameters
----------
lines : list
List of ions to load
vacuum : bool, optional
Use vacuum wavelengths
lamps : optional numpy array of ions, ex np.array(["HgI","CdI","ArI","NeI"])
Returns
-------
alist : Table
Table of arc lines
"""
log=get_logger()
wvmnx = None
if lamps is None :
if camera[0] == 'b':
lamps = ['CdI','ArI','HgI','NeI','KrI']
elif camera[0] == 'r':
lamps = ['CdI','ArI','HgI','NeI','KrI']
elif camera[0] == 'z':
lamps = ['CdI','ArI','HgI','NeI','KrI','XeI']
elif camera == 'all': # Used for specex
lamps = ['CdI','ArI','HgI','NeI','KrI','XeI']
else:
log.error("Not ready for this camera")
# Get the parse dict
parse_dict = load_parse_dict()
# Read rejection file
medium = 'vacuum'
if not vacuum:
log.info("Using air wavelengths")
medium = 'air'
rej_file = resource_filename('desispec', "data/arc_lines/rejected_lines_{0}.yaml".format(medium))
with open(rej_file, 'r') as infile:
rej_dict = yaml.safe_load(infile)
# Loop through the NIST Tables
tbls = []
for iline in lamps:
# Load
tbl = parse_nist(iline, vacuum=vacuum)
# Parse
if iline in parse_dict:
tbl = parse_nist_tbl(tbl,parse_dict[iline])
# Reject
if iline in rej_dict:
log.info("Rejecting select {:s} lines".format(iline))
tbl = reject_lines(tbl,rej_dict[iline])
#print("DEBUG",iline)
#print("DEBUG",tbl[['Ion','wave','RelInt']])
tbls.append(tbl[['Ion','wave','RelInt']])
# Stack
alist = vstack(tbls)
# wvmnx?
if wvmnx is not None:
print('Cutting down line list by wvmnx: {:g},{:g}'.format(wvmnx[0],wvmnx[1]))
gdwv = (alist['wave'] >= wvmnx[0]) & (alist['wave'] <= wvmnx[1])
alist = alist[gdwv]
# Return
return alist
def reject_lines(tbl,rej_dict, rej_tol=0.1):
"""Rejects lines from a NIST table
Taken from PYPIT
Parameters
----------
tbl : Table
Read previously from NIST ASCII file
rej_dict : dict
Dict of rejected lines
rej_tol : float, optional
Tolerance for matching a line to reject to linelist (Angstroms)
Returns
-------
tbl : Table
Rows not rejected
"""
msk = tbl['wave'] == tbl['wave']
# Loop on rejected lines
for wave in rej_dict:
close = np.where(np.abs(wave-tbl['wave']) < rej_tol)[0]
if rej_dict[wave] == 'all':
msk[close] = False
else:
raise ValueError('Not ready for this')
# Return
return tbl[msk]
def parse_nist_tbl(tbl,parse_dict):
"""Parses a NIST table using various criteria
Parameters
----------
tbl : Table
Read previously from NIST ASCII file
parse_dict : dict
Dict of parsing criteria. Read from load_parse_dict
Returns
-------
tbl : Table
Rows meeting the criteria
"""
# Parse
gdI = tbl['RelInt'] >= parse_dict['min_intensity']
gdA = tbl['Aki'] >= parse_dict['min_Aki']
gdw = tbl['wave'] >= parse_dict['min_wave']
# Combine
allgd = gdI & gdA & gdw
# Return
return tbl[allgd]
def load_parse_dict():
"""Dicts for parsing Arc line lists from NIST
Rejected lines are in the rejected_lines.yaml file
"""
dict_parse = dict(min_intensity=0., min_Aki=0., min_wave=0.)
arcline_parse = {}
# ArI
arcline_parse['ArI'] = copy.deepcopy(dict_parse)
arcline_parse['ArI']['min_intensity'] = 1000. # NOT PICKING UP REDDEST LINES
# HgI
arcline_parse['HgI'] = copy.deepcopy(dict_parse)
arcline_parse['HgI']['min_intensity'] = 800.
# HeI
arcline_parse['HeI'] = copy.deepcopy(dict_parse)
arcline_parse['HeI']['min_intensity'] = 20.
# NeI
arcline_parse['NeI'] = copy.deepcopy(dict_parse)
arcline_parse['NeI']['min_intensity'] = 999.
#arcline_parse['NeI']['min_Aki'] = 1. # NOT GOOD FOR DEIMOS, DESI
#arcline_parse['NeI']['min_wave'] = 5700.
arcline_parse['NeI']['min_wave'] = 5850. # NOT GOOD FOR DEIMOS?
# ZnI
arcline_parse['ZnI'] = copy.deepcopy(dict_parse)
arcline_parse['ZnI']['min_intensity'] = 50.
# KrI
arcline_parse['KrI'] = copy.deepcopy(dict_parse)
arcline_parse['KrI']['min_intensity'] = 50.
return arcline_parse
def load_gdarc_lines(camera, llist, vacuum=True,lamps=None,good_lines_filename=None):
"""Loads a select set of arc lines for initial calibrating
Parameters
----------
camera : str
Camera ('b', 'g', 'r')
llist : table of lines to use, with columns Ion, wave
vacuum : bool, optional
Use vacuum wavelengths
lamps : optional numpy array of ions, ex np.array(["HgI","CdI","ArI","NeI"])
Returns
-------
dlamb : float
Dispersion for input camera
wmark : float
wavelength to key off of [???]
gd_lines : ndarray
Array of lines expected to be recorded and good for ID
line_guess : int or None
Guess at the line index corresponding to wmark (default is to guess the 1/2 way point)
"""
log=get_logger()
if lamps is None :
lamps=np.array(["HgI","CdI","ArI","NeI"])
lines={}
dlamb=0.6
if camera[0] == 'b':
dlamb = 0.589
elif camera[0] == 'r':
dlamb = 0.527
elif camera[0] == 'z':
#dlamb = 0.599 # Ang
dlamb = 0.608 # Ang (from teststand, ranges (fiber & wave) from 0.54 to 0.66)
# read good lines
if good_lines_filename is not None :
filename = good_lines_filename
else :
if vacuum :
filename = resource_filename('desispec', "data/arc_lines/goodlines_vacuum.ascii")
else :
filename = resource_filename('desispec', "data/arc_lines/goodlines_air.ascii")
log.info("Reading good lines in {:s}".format(filename))
lines={}
ifile=open(filename)
for line in ifile.readlines() :
if line[0]=="#" :
continue
vals=line.strip().split()
if len(vals)<3 :
log.warning("ignoring line '{:s}' in {:s}".format(line.strip(),filename))
continue
cameras=vals[2]
if cameras.find(camera[0].upper()) < 0 :
continue
ion=vals[1]
wave=float(vals[0])
if ion in lines:
lines[ion].append(wave)
else :
lines[ion]=[wave,]
ifile.close()
log.info("Good lines = {:s}".format(str(lines)))
log.info("Checking consistency with full line list")
nbad=0
for ion in lines:
ii=np.where(llist["Ion"]==ion)[0]
if ii.size == 0 :
continue
all_waves=np.array(llist["wave"][ii])
for j,w in enumerate(lines[ion]) :
i=np.argmin(np.abs(w-all_waves))
if np.abs(w-all_waves[i])>0.2 :
log.error("cannot find good line {:f} of {:s} in full line list. nearest is {:f}".format(w,ion,all_waves[i]))
nbad += 1
elif np.abs(w-all_waves[i])>0.001 :
log.warning("adjusting hardcoded {:s} line {:f} -> {:f} (the NIST line list is the truth)".format(w,ion,all_waves[i]))
lines[ion][j]=all_waves[i]
if nbad>0 :
log.error("{:d} inconsistent hardcoded lines, exiting".format(nbad))
sys.exit(12)
gd_lines=np.array([])
for lamp in lamps :
if lamp in lines:
gd_lines=np.append(gd_lines,lines[lamp])
# Sort and return
gd_lines.sort()
return dlamb, gd_lines
########################################################
# Fiber routines
########################################################
def fiber_gauss(flat, xtrc, xerr, box_radius=2, max_iter=5, debug=False, verbose=False) :
return fiber_gauss_new(flat, xtrc, xerr, box_radius, max_iter)
def fiber_gauss_new(flat, xtrc, xerr, box_radius=2, max_iter=5, debug=False, verbose=False):
"""Find the PSF sigma for each fiber
This serves as an initial guess to what follows
Parameters
----------
flat : ndarray of fiber flat image
xtrc: ndarray of fiber traces
xerr: ndarray of error in fiber traces
box_radius: int, optinal
Radius of boxcar extraction in pixels
max_iter : int, optional
Maximum number of iterations for rejection
Returns
-------
gauss
list of Gaussian sigma
"""
log=get_logger()
npix_y = flat.shape[0]
npix_x = flat.shape[1]
ny = xtrc.shape[0] # number of ccd rows in trace
assert(ny==npix_y)
nfiber = xtrc.shape[1]
minflux=1. # minimal flux in a row to include in the fit
# Loop on fibers
gauss = []
start = 0
for ii in range(nfiber):
if (ii % 25 == 0): # & verbose:
stop=time.time()
if start==0 :
log.info("Working on fiber {:d} of {:d}".format(ii,nfiber))
else :
log.info("Working on fiber %d of %d (25 done in %3.2f sec)"%(ii,nfiber,stop-start))
start=stop
# collect data
central_xpix=np.floor(xtrc[:,ii]+0.5)
begin_xpix=(central_xpix-box_radius).astype(int)
end_xpix=(central_xpix+box_radius+1).astype(int)
dx=[]
flux=[]
for y in range(ny) :
yflux=np.zeros(2*box_radius+1)
tmp=flat[y,begin_xpix[y]:end_xpix[y]]
yflux[:tmp.size] = tmp
syflux=np.sum(yflux)
if syflux<minflux :
continue
dx.append(np.arange(begin_xpix[y],end_xpix[y])-(xtrc[y,ii]))
flux.append(yflux/syflux)
dx=np.array(dx)
flux=np.array(flux)
# compute profile
# one way to get something robust is to compute median in bins
# it's a bit biasing but the PSF is not a Gaussian anyway
bins=np.linspace(-box_radius,box_radius,100)
bstep=bins[1]-bins[0]
bdx=[]
bflux=[]
for b in bins :
ok=(dx>=b)&(dx<(b+bstep))
if np.sum(ok)>1 :
bdx.append(np.mean(dx[ok]))
bflux.append(np.median(flux[ok]))
if len(bdx)<10 :
log.error("sigma fit failed for fiber #%02d"%ii)
log.error("this should only occur for the fiber near the center of the detector (if at all)")
log.error("using the sigma value from the previous fiber")
gauss.append(gauss[-1])
continue
# this is the profile :
bdx=np.array(bdx)
bflux=np.array(bflux)
# fast iterative gaussian fit
sigma = 1.0
sq2 = math.sqrt(2.)
for i in range(10) :
nsigma = sq2*np.sqrt(np.mean(bdx**2*bflux*np.exp(-bdx**2/2/sigma**2))/np.mean(bflux*np.exp(-bdx**2/2/sigma**2)))
if abs(nsigma-sigma) < 0.001 :
break
sigma = nsigma
gauss.append(sigma)
return np.array(gauss)
def fiber_gauss_old(flat, xtrc, xerr, box_radius=2, max_iter=5, debug=False, verbose=False):
"""Find the PSF sigma for each fiber
This serves as an initial guess to what follows
Parameters
----------
flat : ndarray of fiber flat image
xtrc: ndarray of fiber traces
xerr: ndarray of error in fiber traces
box_radius: int, optinal
Radius of boxcar extraction in pixels
max_iter : int, optional
Maximum number of iterations for rejection
Returns
-------
gauss
list of Gaussian sigma
"""
log=get_logger()
log.warning("fiber_gauss uses astropy.modeling. Consider an alternative")
# Init
nfiber = xtrc.shape[1]
ny = xtrc.shape[0]
iy = np.arange(ny).astype(int)
# Mask
mask = np.zeros_like(flat,dtype=int)
# Sub images
xpix_img = np.outer(np.ones(flat.shape[0]),np.arange(flat.shape[1]))
# Gaussian fit
g_init = models.Gaussian1D(amplitude=1., mean=0., stddev=1.)
g_init.amplitude.fixed = True
g_init.mean.fixed = True
fitter = fitting.LevMarLSQFitter()
# Loop on fibers
gauss = []
start = 0
for ii in range(nfiber):
if (ii % 25 == 0): # & verbose:
stop=time.time()
if start==0 :
log.info("Working on fiber {:d} of {:d}".format(ii,nfiber))
else :
log.info("Working on fiber %d of %d (done 25 in %3.2f sec)"%(ii,nfiber,stop-start))
start=stop
mask[:] = 0
ixt = np.round(xtrc[:,ii]).astype(int)
for jj,ibox in enumerate(range(-box_radius,box_radius+1)):
ix = ixt + ibox
try :
mask[iy,ix] = 1
except IndexError :
pass
dx_img = xpix_img - np.outer(xtrc[:,ii],np.ones(flat.shape[1]))
# Sum
flux = np.sum(mask*flat,axis=1)
flux = np.maximum(flux,1.)
# Normalize
nrm_img = flat / np.outer(flux,np.ones(flat.shape[1]))
# Gaussian
cpix = np.where(np.abs(dx_img)<0.10)
if len(cpix[0]) < 50:
cpix = np.where(np.abs(dx_img)<0.40)
amp = np.median(nrm_img[cpix])
g_init.amplitude.value = amp # Fixed
fdimg = dx_img[mask==1].flatten()
fnimg = nrm_img[mask==1].flatten()
# Guess at sigma
gdfn = (fnimg < amp) & (fnimg > 0.)
all_sig = np.abs(fdimg[gdfn]) / np.sqrt( np.log(amp)-np.log(fnimg[gdfn]) )
g_init.stddev.value = np.median(all_sig[np.where((np.abs(fdimg[gdfn])>1) & (np.abs(fdimg[gdfn])<1.5) & (np.isfinite(all_sig)))])
# Initial fit (need to mask!)
parm = fitter(g_init, fdimg, fnimg)
# Iterate
iterate = True
nrej = 0
niter = 0
while iterate & (niter < max_iter):
# Clip
resid = parm(fdimg) - fnimg
resid_mask = sigma_clip(resid, sigma=4., maxiters=5)
# Fit
gdp = ~resid_mask.mask
parm = fitter(g_init, fdimg[gdp], fnimg[gdp])
# Again?
if np.sum(resid_mask.mask) <= nrej:
iterate = False
else:
nrej = np.sum(resid_mask.mask)
niter += 1
if verbose:
log.info("Rejected {:d} in {:d} iterations".format(nrej,niter))
#debug = False
if debug:
plt.clf()
plt.scatter(fdimg[gdp], fnimg[gdp])
x= np.linspace(-box_radius, box_radius, 200)
plt.plot(x, parm(x), 'r-')
plt.show()
plt.close()
pdb.set_trace()
# Save
gauss.append(parm.stddev.value)
#
return np.array(gauss)
def find_fiber_peaks(flat, ypos=None, nwidth=5, debug=False,thresh=None) :
"""Find the peaks of the fiber flat spectra
Preforms book-keeping error checking
Args:
flat : ndarray of fiber flat image
ypos : int [optional] Row for finding peaks
Default is half-way up the image
nwidth : int [optional] Width of peak (end-to-end)
debug: bool, optional
Returns:
xpk, ypos, cut
list of xpk (nearest pixel) at ypos
ndarray of cut through the image
"""
log=get_logger()
log.info("starting")
# Init
Nbundle = 20
Nfiber = 25 # Fibers per bundle
# Set ypos for peak finding
if ypos is None:
ypos = flat.shape[0]//2
# Cut image
cutimg = flat[ypos-50:ypos+50, :]
# Smash
cut = np.median(cutimg, axis=0)
# Set flux threshold
#srt = np.sort(cutimg.flatten()) # this does not work for sparse fibers
#thresh = srt[int(cutimg.size*0.95)] / 2. # this does not work for sparse fibers
if thresh is None :
thresh = np.max(cut)/20.
log.info("Threshold: {:f}".format(thresh))
pixels_below_threshold=np.where(cut<thresh)[0]
if pixels_below_threshold.size>2 :
values_below_threshold = sigma_clip(cut[pixels_below_threshold],sigma=3,maxiters=200)
if values_below_threshold.size>2 :
rms=np.std(values_below_threshold)
nsig=7
new_thresh=max(thresh,nsig*rms)
log.info("Threshold: {:f} -> {:f} ({:d}*rms: {:f})".format(thresh,new_thresh,nsig,nsig*rms))
thresh=new_thresh
else :
log.info("Using input threshold: {:f})".format(thresh))
#gdp = cut > thresh
# Roll to find peaks (simple algorithm)
#nstep = nwidth // 2
#for kk in range(-nstep,nstep):
# if kk < 0:
# test = np.roll(cut,kk) < np.roll(cut,kk+1)
# else:
# test = np.roll(cut,kk) > np.roll(cut,kk+1)
# # Compare
# gdp = gdp & test
#xpk = np.where(gdp)[0]
# Find clusters of adjacent points
clusters=[]
gdp=np.where(cut > thresh)[0]
cluster=[gdp[0]]
for i in gdp[1:] :
if i==cluster[-1]+1 :
cluster.append(i)
else :
clusters.append(cluster)
cluster=[i]
clusters.append(cluster)
log.info("Number of clusters found: {:d}".format(len(clusters)))
# Record max of each cluster
xpk=np.zeros((len(clusters)), dtype=np.int64)
for i in range(len(clusters)) :
t=np.argmax(cut[clusters[i]])
xpk[i]=clusters[i][t]
if debug:
#pdb.xplot(cut, xtwo=xpk, ytwo=cut[xpk],mtwo='o')
pdb.set_trace()
# Book-keeping and some error checking
if len(xpk) != Nbundle*Nfiber:
log.warning('Found the wrong number of total fibers: {:d}'.format(len(xpk)))
else:
log.info('Found {:d} fibers'.format(len(xpk)))
# Find bundles
xsep = np.roll(xpk,-1) - xpk
medsep = np.median(xsep)
bundle_ends = np.where(np.abs(xsep-medsep) > 0.5*medsep)[0]
if len(bundle_ends) != Nbundle:
log.warning('Found the wrong number of bundles: {:d}'.format(len(bundle_ends)))
else:
log.info('Found {:d} bundles'.format(len(bundle_ends)))
# Confirm correct number of fibers per bundle
bad = ((bundle_ends+1) % Nfiber) != 0
if np.sum(bad) > 0:
log.warning('Wrong number of fibers in a bundle')
#raise ValueError('Wrong number of fibers in a bundle')
# Return
return xpk, ypos, cut
def fit_traces(xset, xerr, func='legendre', order=6, sigrej=20.,
RMS_TOLER=0.03, verbose=False):
"""Fit the traces
Default is 6th order Legendre polynomials
Parameters
----------
xset : ndarray
traces
xerr : ndarray
Error in the trace values (999.=Bad)
RMS_TOLER : float, optional [0.02]
Tolerance on size of RMS in fit
Returns
-------
xnew, fits
xnew : ndarray
New fit values (without error)
fits : list
List of the fit dicts
"""
log=get_logger()
ny = xset.shape[0]
ntrace = xset.shape[1]
xnew = np.zeros_like(xset)
fits = []
yval = np.arange(ny)
for ii in range(ntrace):
mask = xerr[:,ii] > 900.
nmask = np.sum(mask)
# Fit with rejection
dfit, mask = dufits.iter_fit(yval, xset[:,ii], func, order, sig_rej=sigrej,
weights=1./xerr[:,ii], initialmask=mask, maxone=True)#, sigma=xerr[:,ii])
# Stats on residuals
nmask_new = np.sum(mask)-nmask
if nmask_new > 200:
log.error("Rejected many points ({:d}) in fiber {:d}".format(nmask_new, ii))
# Save
xnew[:,ii] = dufits.func_val(yval,dfit)
fits.append(dfit)
# Residuas
gdval = mask==0
resid = xnew[:,ii][gdval] - xset[:,ii][gdval]
rms = np.std(resid)
if verbose:
print('RMS of FIT= {:g}'.format(rms))
if rms > RMS_TOLER:
#from xastropy.xutils import xdebug as xdb
#xdb.xplot(yval, xnew[:,ii], xtwo=yval[gdval],ytwo=xset[:,ii][gdval], mtwo='o')
log.error("RMS {:g} exceeded tolerance for fiber {:d}".format(rms, ii))
# Return
return xnew, fits
def extract_sngfibers_gaussianpsf(img, img_ivar, xtrc, sigma, box_radius=2, verbose=True):
"""Extract spectrum for fibers one-by-one using a Gaussian PSF
Parameters
----------
img : ndarray
Image
img_ivar : ndarray
Image inverse variance
xtrc : ndarray
fiber trace
sigma : float
Gaussian sigma for PSF
box_radius : int, optional
Radius for extraction (+/-)
Returns
-------
spec : ndarray
Extracted spectrum
"""
# Init
xpix_img = np.outer(np.ones(img.shape[0]),np.arange(img.shape[1]))
mask = np.zeros_like(img,dtype=int)
iy = np.arange(img.shape[0],dtype=int)
log = get_logger()
#
all_spec = np.zeros_like(xtrc)
cst = 1./np.sqrt(2*np.pi)
start=0
for qq in range(xtrc.shape[1]):
if verbose & (qq % 25 == 0):
stop=time.time()
if start>0 :
log.info("Working on fiber %d of %d (done 25 in %3.2f sec)"%(qq,xtrc.shape[1],stop-start))
else :
log.info("Working on fiber %d of %d"%(qq,xtrc.shape[1]))
start=stop
# Mask
mask[:,:] = 0
ixt = np.round(xtrc[:,qq]).astype(int)
for jj,ibox in enumerate(range(-box_radius,box_radius+1)):
ix = ixt + ibox
try :
mask[iy,ix] = 1
except IndexError :
pass
# Sub-image (for speed, not convenience)
gdp = np.where(mask == 1)
if len(gdp[1])<2: continue
minx = np.min(gdp[1])
maxx = np.max(gdp[1])
nx = (maxx-minx)+1
# Generate PSF
dx_img = xpix_img[:,minx:maxx+1] - np.outer(xtrc[:,qq], np.ones(nx))
psf = cst*np.exp(-0.5 * (dx_img/sigma[qq])**2)/sigma[qq]
#dx_img = xpix_img[:,minx:maxx+1] - np.outer(xtrc[:,qq],np.ones(img.shape[1]))
#g_init = models.Gaussian1D(amplitude=1., mean=0., stddev=sigma[qq])
#psf = mask * g_init(dx_img)
# Extract
#all_spec[:,qq] = np.sum(psf*img,axis=1) / np.sum(psf,axis=1)
#all_spec[:,qq] = np.sum(psf*img[:,minx:maxx+1],axis=1) / np.sum(psf,axis=1)
a=np.sum(img_ivar[:,minx:maxx+1]*psf**2,axis=1)
b=np.sum(img_ivar[:,minx:maxx+1]*psf*img[:,minx:maxx+1],axis=1)
ok=(a>1.e-6)
all_spec[ok,qq] = b[ok] / a[ok]
#import astropy.io.fits as pyfits
#h=pyfits.HDUList([pyfits.PrimaryHDU(),
# pyfits.ImageHDU(img[:,minx:maxx+1],name="FLUX"),
# pyfits.ImageHDU(img_ivar[:,minx:maxx+1],name="IVAR"),
# pyfits.ImageHDU(psf,name="PSF"),
# pyfits.ImageHDU(a,name="A"),
# pyfits.ImageHDU(b,name="B")])
#h.writeto("test.fits")
#sys.exit(12)
# Return
return all_spec
def trace_crude_init(image, xinit0, ypass, invvar=None, radius=2.,
maxshift0=0.5, maxshift=0.15, maxerr=0.2):
# xset, xerr, maxerr, maxshift, maxshift0
"""Python port of trace_crude_idl.pro from IDLUTILS
Modified for initial guess
Parameters
----------
image : 2D ndarray
Image for tracing
xinit : ndarray
Initial guesses for trace peak at ypass
ypass : int
Row for initial guesses
Returns
-------
xset : Trace for each fiber
xerr : Estimated error in that trace
"""
# Init
xinit = xinit0.astype(float)
#xinit = xinit[0:3]
ntrace = xinit.size
ny = image.shape[0]
xset = np.zeros((ny,ntrace))
xerr = np.zeros((ny,ntrace))
if invvar is None:
invvar = np.zeros_like(image) + 1.
#
# Recenter INITIAL Row for all traces simultaneously
#
iy = ypass * np.ones(ntrace,dtype=int)
xfit,xfiterr = trace_fweight(image, xinit, iy, invvar=invvar, radius=radius)
# Shift
xshift = np.clip(xfit-xinit, -1*maxshift0, maxshift0) * (xfiterr < maxerr)
xset[ypass,:] = xinit + xshift
xerr[ypass,:] = xfiterr * (xfiterr < maxerr) + 999.0 * (xfiterr >= maxerr)
# /* LOOP FROM INITIAL (COL,ROW) NUMBER TO LARGER ROW NUMBERS */
for iy in range(ypass+1, ny):
xinit = xset[iy-1, :]
ycen = iy * np.ones(ntrace,dtype=int)
xfit,xfiterr = trace_fweight(image, xinit, ycen, invvar=invvar, radius=radius)
# Shift
xshift = np.clip(xfit-xinit, -1*maxshift, maxshift) * (xfiterr < maxerr)
# Save
xset[iy,:] = xinit + xshift
xerr[iy,:] = xfiterr * (xfiterr < maxerr) + 999.0 * (xfiterr >= maxerr)
# /* LOOP FROM INITIAL (COL,ROW) NUMBER TO SMALLER ROW NUMBERS */
for iy in range(ypass-1, -1,-1):
xinit = xset[iy+1, :]
ycen = iy * np.ones(ntrace,dtype=int)
xfit,xfiterr = trace_fweight(image, xinit, ycen, invvar=invvar, radius=radius)
# Shift
xshift = np.clip(xfit-xinit, -1*maxshift, maxshift) * (xfiterr < maxerr)
# Save
xset[iy,:] = xinit + xshift
xerr[iy,:] = xfiterr * (xfiterr < maxerr) + 999.0 * (xfiterr >= maxerr)
return xset, xerr
def trace_fweight(fimage, xinit, ycen=None, invvar=None, radius=2., debug=False):
'''Python port of trace_fweight.pro from IDLUTILS
Parameters
----------
fimage: 2D ndarray
Image for tracing
xinit: ndarray
Initial guesses for x-trace
invvar: ndarray, optional
Inverse variance array for the image
radius: float, optional
Radius for centroiding; default to 3.0
'''
# Definitions for Cython
#cdef int nx,ny,ncen
# Init
nx = fimage.shape[1]
ny = fimage.shape[0]
ncen = len(xinit)
# Create xnew, xerr
xnew = xinit.astype(float)
xerr = np.zeros(ncen) + 999.
# ycen
if ycen is None:
if ncen != ny:
raise ValueError('Bad input')
ycen = np.arange(ny, dtype=int)
else:
if len(ycen) != ncen:
raise ValueError('Bad ycen input. Wrong length')
x1 = xinit - radius + 0.5
x2 = xinit + radius + 0.5
ix1 = np.floor(x1).astype(int)
ix2 = np.floor(x2).astype(int)
fullpix = int(np.maximum(np.min(ix2-ix1)-1,0))
sumw = np.zeros(ncen)
sumxw = np.zeros(ncen)
sumwt = np.zeros(ncen)
sumsx1 = np.zeros(ncen)
sumsx2 = np.zeros(ncen)
qbad = np.array([False]*ncen)
if invvar is None:
invvar = np.zeros_like(fimage) + 1.
# Compute
for ii in range(0,fullpix+3):
spot = ix1 - 1 + ii
ih = np.clip(spot,0,nx-1)
xdiff = spot - xinit
#
wt = np.clip(radius - np.abs(xdiff) + 0.5,0,1) * ((spot >= 0) & (spot < nx))
sumw = sumw + fimage[ycen,ih] * wt
sumwt = sumwt + wt
sumxw = sumxw + fimage[ycen,ih] * xdiff * wt
var_term = wt**2 / (invvar[ycen,ih] + (invvar[ycen,ih] == 0))
sumsx2 = sumsx2 + var_term
sumsx1 = sumsx1 + xdiff**2 * var_term
#qbad = qbad or (invvar[ycen,ih] <= 0)
qbad = np.any([qbad, invvar[ycen,ih] <= 0], axis=0)
if debug:
pdb.set_trace()
# Fill up
good = (sumw > 0) & (~qbad)
if np.sum(good) > 0:
delta_x = sumxw[good]/sumw[good]
xnew[good] = delta_x + xinit[good]
xerr[good] = np.sqrt(sumsx1[good] + sumsx2[good]*delta_x**2)/sumw[good]
bad = np.any([np.abs(xnew-xinit) > radius + 0.5,xinit < radius - 0.5,xinit > nx - 0.5 - radius],axis=0)
if np.sum(bad) > 0:
xnew[bad] = xinit[bad]
xerr[bad] = 999.0
# Return
return xnew, xerr
def fix_ycoeff_outliers(xcoeff, ycoeff, deg=5, tolerance=2):
'''
Fix outliers in coefficients for wavelength solution, assuming a continuous function of CCD coordinates
Args:
xcoeff[nfiber, ncoeff] : 2D array of Legendre coefficients for X(wavelength)
ycoeff[nfiber, ncoeff] : 2D array of Legendre coefficients for Y(wavelength)
Options:
deg : integer degree of polynomial to fit
tolerance : replace fibers with difference of wavelength solution larger than this number of pixels after interpolation
Returns:
new_ycoeff[nfiber, ncoeff] with outliers replaced by interpolations
For each coefficient, fit a polynomial vs. fiber number with one
pass of sigma clipping. Remaining outliers are than replaced with
the interpolated fit value.
'''
log = get_logger()
nfibers=ycoeff.shape[0]
if nfibers < 3 :
log.warning("only {:d} fibers, cannot interpolate coefs".format(nfibers))
return ycoeff
deg=min(deg,nfibers-1)
nwave=ycoeff.shape[1]+1
wave_nodes = np.linspace(-1,1,nwave)
# get traces using fit coefs
x=np.zeros((nfibers,nwave))
y=np.zeros((nfibers,nwave))
for i in range(nfibers) :
x[i] = legval(wave_nodes,xcoeff[i])
y[i] = legval(wave_nodes,ycoeff[i])
new_ycoeff=ycoeff.copy()
bad_fibers=None
while True : # loop to discard one fiber at a time
# polynomial fit as a function of x for each wave
yf=np.zeros((nfibers,nwave))
xx=2*(x - np.min(x)) / (np.max(x) - np.min(x)) - 1
for i in range(nwave) :
c=np.polyfit(xx[:,i], y[:,i], deg)
yf[:,i]=np.polyval(c, xx[:,i])
diff=np.max(np.abs(y-yf),axis=1)
for f in range(nfibers) :
log.info("fiber {:d} maxdiff= {:f}".format(f,diff[f]))
worst = np.argmax(diff)
if diff[worst] > tolerance :
log.warning("replace fiber {:d} trace by interpolation".format(worst))
leg_fit = dufits.func_fit(wave_nodes, yf[worst], 'legendre', ycoeff.shape[1]-1, xmin=-1, xmax=1)
new_ycoeff[worst] = leg_fit['coeff']
y[worst] = legval(wave_nodes,new_ycoeff[worst])
if bad_fibers is None :
bad_fibers = np.array([worst])
else :
bad_fibers=np.append(bad_fibers, worst)
bad_fibers=np.unique(bad_fibers)
continue
break
return new_ycoeff
#####################################################################
#####################################################################
# Output
#####################################################################
def write_psf(outfile, xfit, fdicts, gauss, wv_solns, legendre_deg=5, without_arc=False,
XCOEFF=None, fiberflat_header=None, arc_header=None, fix_ycoeff=True):
""" Write the output to a Base PSF format
Parameters
----------
outfile : str
Output file
xfit : ndarray
Traces
gauss : list
List of gaussian sigmas
fdicts : list
List of trace fits
wv_solns : list
List of wavelength calibrations
ncoeff : int
Number of Legendre coefficients in fits
"""
#
# check legendre degree makes sense based on number of lines
if not without_arc:
nlines=10000
for ii,id_dict in enumerate(wv_solns):
if len(id_dict['id_pix']) > 0 :
nlines_in_fiber=(np.array(id_dict['id_pix'])[id_dict['mask']==0]).size
#print("fiber #%d nlines=%d"%(ii,nlines_in_fiber))
nlines=min(nlines,nlines_in_fiber)
if nlines < legendre_deg+2 :
legendre_deg=nlines-2
print("reducing legendre degree to %d because the min. number of emission lines found is %d"%(legendre_deg,nlines))
ny = xfit.shape[0]
nfiber = xfit.shape[1]
ncoeff=legendre_deg+1
if XCOEFF is None:
XCOEFF = np.zeros((nfiber, ncoeff))
YCOEFF = np.zeros((nfiber, ncoeff))
# Find WAVEMIN, WAVEMAX
if without_arc:
WAVEMIN = 0.
WAVEMAX = ny-1.
wv_solns = [None]*nfiber
else:
WAVEMIN = 10000000.
WAVEMAX = 0.
for id_dict in wv_solns :
if 'wave_min' in id_dict :
WAVEMIN = min(WAVEMIN,id_dict['wave_min'])
if 'wave_max' in id_dict :
WAVEMAX = max(WAVEMAX,id_dict['wave_max'])
WAVEMIN -= 1.
WAVEMAX += 1.
wv_array = np.linspace(WAVEMIN, WAVEMAX, num=ny)
# Fit Legendre to y vs. wave
for ii,id_dict in enumerate(wv_solns):
# Fit y vs. wave
if without_arc:
yleg_fit, mask = dufits.iter_fit(wv_array, np.arange(ny), 'legendre', ncoeff-1, xmin=WAVEMIN, xmax=WAVEMAX, niter=1)
else:
if len(id_dict['id_wave']) > 0 :
yleg_fit, mask = dufits.iter_fit(np.array(id_dict['id_wave'])[id_dict['mask']==0], np.array(id_dict['id_pix'])[id_dict['mask']==0], 'legendre', ncoeff-1, xmin=WAVEMIN, xmax=WAVEMAX, sig_rej=100000.)
else :
yleg_fit = None
mask = None
if yleg_fit is None :
continue
YCOEFF[ii, :] = yleg_fit['coeff']
# Fit x vs. wave
yval = dufits.func_val(wv_array, yleg_fit)
if fdicts is None:
if XCOEFF is None:
raise IOError("Need to set either fdicts or XCOEFF!")
else:
xtrc = dufits.func_val(yval, fdicts[ii])
xleg_fit,mask = dufits.iter_fit(wv_array, xtrc, 'legendre', ncoeff-1, xmin=WAVEMIN, xmax=WAVEMAX, niter=5, sig_rej=100000.)
XCOEFF[ii, :] = xleg_fit['coeff']
# Fix outliers assuming that coefficients vary smoothly vs. CCD coordinates
if fix_ycoeff :
YCOEFF = fix_ycoeff_outliers(XCOEFF,YCOEFF,tolerance=2)
# Write the FITS file
prihdu = fits.PrimaryHDU(XCOEFF)
prihdu.header['WAVEMIN'] = WAVEMIN
prihdu.header['WAVEMAX'] = WAVEMAX
prihdu.header['EXTNAME'] = 'XTRACE'
prihdu.header['PSFTYPE'] = 'bootcalib'
from desiutil.depend import add_dependencies
add_dependencies(prihdu.header)
# Add informations for headers
if arc_header is not None :
if "NIGHT" in arc_header:
prihdu.header["ARCNIGHT"] = arc_header["NIGHT"]
if "EXPID" in arc_header:
prihdu.header["ARCEXPID"] = arc_header["EXPID"]
if "CAMERA" in arc_header:
prihdu.header["CAMERA"] = arc_header["CAMERA"]
prihdu.header['NPIX_X'] = arc_header['NAXIS1']
prihdu.header['NPIX_Y'] = arc_header['NAXIS2']
if fiberflat_header is not None :
if 'NPIX_X' not in prihdu.header:
prihdu.header['NPIX_X'] = fiberflat_header['NAXIS1']
prihdu.header['NPIX_Y'] = fiberflat_header['NAXIS2']
if "NIGHT" in fiberflat_header:
prihdu.header["FLANIGHT"] = fiberflat_header["NIGHT"]
if "EXPID" in fiberflat_header:
prihdu.header["FLAEXPID"] = fiberflat_header["EXPID"]
yhdu = fits.ImageHDU(YCOEFF, name='YTRACE')
# also save wavemin wavemax in yhdu
yhdu.header['WAVEMIN'] = WAVEMIN
yhdu.header['WAVEMAX'] = WAVEMAX
gausshdu = fits.ImageHDU(np.array(gauss), name='XSIGMA')
hdulist = fits.HDUList([prihdu, yhdu, gausshdu])
hdulist.writeto(outfile, overwrite=True)
def write_line_list(filename,all_wv_soln,llist) :
wave = np.array([])
for id_dict in all_wv_soln :
wave=np.append(wave,id_dict["id_wave"])
wave=np.unique(wave)
ofile=open(filename,"w")
ofile.write("# from bootcalib\n")
ofile.write("Ion wave score RelInt\n")
for w in wave :
ii=np.argmin(np.abs(llist["wave"]-w))
print(w,llist["wave"][ii],llist["Ion"][ii])
ofile.write("{:s} {:f} 1 1\n".format(llist["Ion"][ii],w))
ofile.close()
#####################################################################
#####################################################################
# Utilities
#####################################################################
def script_bootcalib(arc_idx, flat_idx, cameras=None, channels=None, nproc=10):
""" Runs desi_bootcalib on a series of preproc files
Returns:
script_bootcalib([0,1,2,3,4,5,6,7,8,9], [10,11,12,13,14])
"""
from subprocess import Popen
#
if cameras is None:
cameras = ['0','1','2','3','4','5','6','7','8','9']
if channels is None:
channels = ['b','r','z']
#channels = ['b']#,'r','z']
nchannels = len(channels)
ncameras = len(cameras)
#
narc = len(arc_idx)
nflat = len(flat_idx)
ntrial = narc*nflat*ncameras*nchannels
# Loop on the systems
nrun = -1
#nrun = 123
while(nrun < ntrial):
proc = []
ofiles = []
for ss in range(nproc):
nrun += 1
iarc = nrun % narc
jflat = (nrun//narc) % nflat
kcamera = (nrun//(narc*nflat)) % ncameras
lchannel = nrun // (narc*nflat*ncameras)
#pdb.set_trace()
if nrun == ntrial:
break
# Names
#- TODO: update to use desispec.io.findfile instead
afile = str('preproc-{:s}{:s}-{:08d}.fits'.format(channels[lchannel], cameras[kcamera], arc_idx[iarc]))
ffile = str('preproc-{:s}{:s}-{:08d}.fits'.format(channels[lchannel], cameras[kcamera], flat_idx[jflat]))
ofile = str('boot_psf-{:s}{:s}-{:d}{:d}.fits'.format(channels[lchannel], cameras[kcamera],
arc_idx[iarc], flat_idx[jflat]))
qfile = str('qa_boot-{:s}{:s}-{:d}{:d}.pdf'.format(channels[lchannel], cameras[kcamera],
arc_idx[iarc], flat_idx[jflat]))
lfile = str('boot-{:s}{:s}-{:d}{:d}.log'.format(channels[lchannel], cameras[kcamera],
arc_idx[iarc], flat_idx[jflat]))
## Run
script = [str('desi_bootcalib.py'), str('--fiberflat={:s}'.format(ffile)),
str('--arcfile={:s}'.format(afile)),
str('--outfile={:s}'.format(ofile)),
str('--qafile={:s}'.format(qfile))]#,
#str('>'),
#str('{:s}'.format(lfile))]
f = open(lfile, "w")
proc.append(Popen(script, stdout=f))
ofiles.append(f)
exit_codes = [p.wait() for p in proc]
for ofile in ofiles:
ofile.close()
#####################################################################
#####################################################################
#####################################################################
# QA
#####################################################################
def qa_fiber_peaks(xpk, cut, pp=None, figsz=None, nper=100):
""" Generate a QA plot for the fiber peaks
Args:
xpk: x positions on the CCD of the fiber peaks at a ypos
cut: Spatial cut through the detector
pp: PDF file pointer
figsz: figure size, optional
nper: number of fibers per row in the plot, optional
"""
# Init
if figsz is None:
figsz = glbl_figsz
nfiber = xpk.size
nrow = (nfiber // nper) + ((nfiber % nper) > 0)
xplt = np.arange(cut.size)
# Plots
gs = gridspec.GridSpec(nrow, 1)
plt.figure(figsize=figsz)
# Loop
for ii in range(nrow):
ax = plt.subplot(gs[ii])
i0 = ii*nper
i1 = i0 + nper
ax.plot(xplt,cut, 'k-')
ax.plot(xpk, cut[xpk],'go')
xmin = np.min(xpk[i0:i1])-10.
xmax = np.max(xpk[i0:i1])+10.
ax.set_xlim(xmin,xmax)
# Save and close
if pp is not None:
pp.savefig(bbox_inches='tight')
else:
plt.show()
plt.close()
def qa_fiber_Dx(xfit, fdicts, pp=None, figsz=None):
""" Show the spread in the trace per fiber
Used to diagnose the traces
Args:
xfit: traces
fdicts: dict of the traces
pp: PDF file pointer
figsz: figure size, optional
"""
#
if figsz is None:
figsz = glbl_figsz
# Calculate Dx
nfiber = xfit.shape[1]
Dx = []
for ii in range(nfiber):
Dx.append(np.max(xfit[:, ii])-np.min(xfit[:, ii]))
# Plot
plt.figure(figsize=figsz)
plt.scatter(np.arange(nfiber), np.array(Dx))
# Label
plt.xlabel('Fiber', fontsize=17.)
plt.ylabel(r'$\Delta x$ (pixels)', fontsize=17.)
# Save and close
if pp is None:
plt.show()
else:
pp.savefig(bbox_inches='tight')
plt.close()
def qa_fiber_gauss(gauss, pp=None, figsz=None):
""" Show the Gaussian (sigma) fits to each fiber
Args:
gauss: Gaussian of each fiber
pp: PDF file pointer
figsz: figure size, optional
"""
#
if figsz is None:
figsz = glbl_figsz
# Calculate Dx
nfiber = gauss.size
# Plot
plt.figure(figsize=figsz)
plt.scatter(np.arange(nfiber), gauss)
# Label
plt.xlabel('Fiber', fontsize=17.)
plt.ylabel('Gaussian sigma (pixels)', fontsize=17.)
# Save and close
if pp is None:
plt.show()
else:
pp.savefig(bbox_inches='tight')
plt.close()
def qa_arc_spec(all_spec, all_soln, pp, figsz=None):
""" Generate QA plots of the arc spectra with IDs
Args:
all_spec: Arc 1D fiber spectra
all_soln: Wavelength solutions
pp: PDF file pointer
figsz: figure size, optional
"""
# Init
if figsz is None:
figsz = glbl_figsz
nfiber = len(all_soln)
npix = all_spec.shape[0]
#
nrow = 2
ncol = 3
# Plots
gs = gridspec.GridSpec(nrow, ncol)
plt.figure(figsize=figsz)
# Loop
for ii in range(nrow*ncol):
ax = plt.subplot(gs[ii])
idx = ii * (nfiber//(nrow*ncol))
yspec = np.log10(np.maximum(all_spec[:,idx],1))
ax.plot(np.arange(npix), yspec, 'k-')
ax.set_xlabel('Pixel')
ax.set_ylabel('log Flux')
# ID
id_dict = all_soln[idx]
for jj,xpixpk in enumerate(id_dict['id_pix']):
ax.text(xpixpk, yspec[int(np.round(xpixpk))], '{:g}'.format(id_dict['id_wave'][jj]), ha='center',color='red', rotation=90.)
# Save and close
pp.savefig(bbox_inches='tight')
plt.close()
def qa_fiber_arcrms(all_soln, pp, figsz=None):
""" Show the RMS of the wavelength solutions vs. fiber
Args:
all_soln: Wavelength solutions
pp: PDF file pointer
figsz: figure size, optional
"""
#
if figsz is None:
figsz = glbl_figsz
# Calculate Dx
nfiber = len(all_soln)
rms = [id_dict['rms'] for id_dict in all_soln]
# Plot
plt.figure(figsize=figsz)
plt.scatter(np.arange(nfiber), np.array(rms))
# Label
plt.xlabel('Fiber', fontsize=17.)
plt.ylabel('RMS (pixels)', fontsize=17.)
# Save and close
pp.savefig(bbox_inches='tight')
plt.close()
def qa_fiber_dlamb(all_spec, all_soln, pp, figsz=None):
""" Show the Dlamb of the wavelength solutions vs. fiber
Args:
all_soln: Wavelength solutions
pp: PDF file pointer
figsz: figure size, optional
"""
#
if figsz is None:
figsz = glbl_figsz
# Calculate Dx
nfiber = len(all_soln)
npix = all_spec.shape[0]
xval = np.arange(npix)
dlamb = []
for ii in range(nfiber):
idict = all_soln[ii]
wave = dufits.func_val(xval,idict['final_fit_pix'])
dlamb.append(np.median(np.abs(wave-np.roll(wave,1))))
# Plot
plt.figure(figsize=figsz)
plt.scatter(np.arange(nfiber), np.array(dlamb))
# Label
plt.xlabel('Fiber', fontsize=17.)
plt.ylabel(r'$\Delta \lambda$ (Ang)', fontsize=17.)
# Save and close
pp.savefig(bbox_inches='tight')
plt.close()
def qa_fiber_trace(flat, xtrc, outfil=None, Nfiber=25, isclmin=0.5):
''' Generate a QA plot for the fiber traces
Parameters
----------
flat: ndarray
image
xtrc: ndarray
Trace array
isclmin: float, optional [0.5]
Fraction of 90 percentile flux to scale image by
outfil: str, optional
Output file
normalize: bool, optional
Normalize the flat? If not, use zscale for output
'''
ticks_font = matplotlib.font_manager.FontProperties(family='times new roman',
style='normal', size=16, weight='normal', stretch='normal')
plt.rcParams['font.family']= 'times new roman'
cmm = cm.Greys_r
# Outfil
if outfil is None:
outfil = 'fiber_trace_qa.pdf'
ntrc = xtrc.shape[1]
ycen = np.arange(flat.shape[0])
# Plot
pp = PdfPages(outfil)
plt.clf()
fig = plt.figure(figsize=(8, 5.0),dpi=1200)
#fig.set_size_inches(10.0,6.5)
Nbundle = ntrc // Nfiber + (ntrc%Nfiber > 0)
for qq in range(Nbundle):
ax = plt.gca()
for label in ax.get_yticklabels() :
label.set_fontproperties(ticks_font)
for label in ax.get_xticklabels() :
label.set_fontproperties(ticks_font)
# Cut image
i0 = qq*Nfiber
i1 = np.minimum((qq+1)*Nfiber,ntrc)
x0 = np.maximum(int(np.min(xtrc[:,i0:i1]))-3,0)
x1 = np.minimum(int(np.max(xtrc[:,i0:i1]))+3,flat.shape[1])
sub_flat = flat[:,x0:x1].T
# Scale
srt = np.sort(sub_flat.flatten())
sclmax = srt[int(sub_flat.size*0.9)]
sclmin = isclmin * sclmax
# Plot
mplt = plt.imshow(sub_flat,origin='lower', cmap=cmm,
extent=(0., sub_flat.shape[1]-1, x0,x1-1), aspect='auto')
#extent=(0., sub_flat.shape[1]-1, x0,x1))
#mplt.set_clim(vmin=sclmin, vmax=sclmax)
# Axes
#plt.xlim(0., sub_flat.shape[1]-1)
plt.xlim(0., sub_flat.shape[1]-1)
plt.ylim(x0,x1)
# Traces
for ii in range(i0,i1):
# Left
plt.plot(ycen, xtrc[:,ii], 'r-',alpha=0.7, linewidth=0.5)
# Label
#iy = int(frame.shape[0]/2.)
#plt.text(ltrace[iy,ii], ycen[iy], '{:d}'.format(ii+1), color='red', ha='center')
#plt.text(rtrace[iy,ii], ycen[iy], '{:d}'.format(ii+1), color='green', ha='center')
pp.savefig(bbox_inches='tight')
plt.close()
# Finish
print('Writing {:s} QA for fiber trace'.format(outfil))
pp.close()
|
desihub/desispec
|
py/desispec/bootcalib.py
|
Python
|
bsd-3-clause
| 66,424
|
[
"Gaussian"
] |
e34fdeb202af140b636b92d0375bb3960ca6c0b284cd095edc4362a14d8b4ca8
|
""" DIRAC FileCatalog mix-in class to manage directory metadata
"""
# pylint: disable=protected-access
__RCSID__ = "$Id$"
import os
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.Time import queryTime
class DirectoryMetadata:
def __init__(self, database=None):
self.db = database
def setDatabase(self, database):
self.db = database
##############################################################################
#
# Manage Metadata fields
#
def addMetadataField(self, pname, ptype, credDict):
""" Add a new metadata parameter to the Metadata Database.
pname - parameter name, ptype - parameter type in the MySQL notation
"""
result = self.db.fmeta.getFileMetadataFields(credDict)
if not result['OK']:
return result
if pname in result['Value'].keys():
return S_ERROR('The metadata %s is already defined for Files' % pname)
result = self.getMetadataFields(credDict)
if not result['OK']:
return result
if pname in result['Value'].keys():
if ptype.lower() == result['Value'][pname].lower():
return S_OK('Already exists')
return S_ERROR('Attempt to add an existing metadata with different type: %s/%s' %
(ptype, result['Value'][pname]))
valueType = ptype
if ptype.lower()[:3] == 'int':
valueType = 'INT'
elif ptype.lower() == 'string':
valueType = 'VARCHAR(128)'
elif ptype.lower() == 'float':
valueType = 'FLOAT'
elif ptype.lower() == 'date':
valueType = 'DATETIME'
elif ptype == "MetaSet":
valueType = "VARCHAR(64)"
req = "CREATE TABLE FC_Meta_%s ( DirID INTEGER NOT NULL, Value %s, PRIMARY KEY (DirID), INDEX (Value) )" \
% (pname, valueType)
result = self.db._query(req)
if not result['OK']:
return result
result = self.db.insertFields('FC_MetaFields', ['MetaName', 'MetaType'], [pname, ptype])
if not result['OK']:
return result
metadataID = result['lastRowId']
result = self.__transformMetaParameterToData(pname)
if not result['OK']:
return result
return S_OK("Added new metadata: %d" % metadataID)
def deleteMetadataField(self, pname, credDict):
""" Remove metadata field
"""
req = "DROP TABLE FC_Meta_%s" % pname
result = self.db._update(req)
error = ''
if not result['OK']:
error = result["Message"]
req = "DELETE FROM FC_MetaFields WHERE MetaName='%s'" % pname
result = self.db._update(req)
if not result['OK']:
if error:
result["Message"] = error + "; " + result["Message"]
return result
def getMetadataFields(self, credDict):
""" Get all the defined metadata fields
"""
req = "SELECT MetaName,MetaType FROM FC_MetaFields"
result = self.db._query(req)
if not result['OK']:
return result
metaDict = {}
for row in result['Value']:
metaDict[row[0]] = row[1]
return S_OK(metaDict)
def addMetadataSet(self, metaSetName, metaSetDict, credDict):
""" Add a new metadata set with the contents from metaSetDict
"""
result = self.getMetadataFields(credDict)
if not result['OK']:
return result
metaTypeDict = result['Value']
# Check the sanity of the metadata set contents
for key in metaSetDict:
if key not in metaTypeDict:
return S_ERROR('Unknown key %s' % key)
result = self.db.insertFields('FC_MetaSetNames', ['MetaSetName'], [metaSetName])
if not result['OK']:
return result
metaSetID = result['lastRowId']
req = "INSERT INTO FC_MetaSets (MetaSetID,MetaKey,MetaValue) VALUES %s"
vList = []
for key, value in metaSetDict.items():
vList.append("(%d,'%s','%s')" % (metaSetID, key, str(value)))
vString = ','.join(vList)
result = self.db._update(req % vString)
return result
def getMetadataSet(self, metaSetName, expandFlag, credDict):
""" Get fully expanded contents of the metadata set
"""
result = self.getMetadataFields(credDict)
if not result['OK']:
return result
metaTypeDict = result['Value']
req = "SELECT S.MetaKey,S.MetaValue FROM FC_MetaSets as S, FC_MetaSetNames as N "
req += "WHERE N.MetaSetName='%s' AND N.MetaSetID=S.MetaSetID" % metaSetName
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_OK({})
resultDict = {}
for key, value in result['Value']:
if key not in metaTypeDict:
return S_ERROR('Unknown key %s' % key)
if expandFlag:
if metaTypeDict[key] == "MetaSet":
result = self.getMetadataSet(value, expandFlag, credDict)
if not result['OK']:
return result
resultDict.update(result['Value'])
else:
resultDict[key] = value
else:
resultDict[key] = value
return S_OK(resultDict)
#############################################################################################
#
# Set and get directory metadata
#
#############################################################################################
def setMetadata(self, dpath, metadict, credDict):
""" Set the value of a given metadata field for the the given directory path
"""
result = self.getMetadataFields(credDict)
if not result['OK']:
return result
metaFields = result['Value']
result = self.db.dtree.findDir(dpath)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Path not found: %s' % dpath)
dirID = result['Value']
dirmeta = self.getDirectoryMetadata(dpath, credDict, owndata=False)
if not dirmeta['OK']:
return dirmeta
for metaName, metaValue in metadict.items():
if metaName not in metaFields:
result = self.setMetaParameter(dpath, metaName, metaValue, credDict)
if not result['OK']:
return result
continue
# Check that the metadata is not defined for the parent directories
if metaName in dirmeta['Value']:
return S_ERROR('Metadata conflict detected for %s for directory %s' % (metaName, dpath))
result = self.db.insertFields('FC_Meta_%s' % metaName, ['DirID', 'Value'], [dirID, metaValue])
if not result['OK']:
if result['Message'].find('Duplicate') != -1:
req = "UPDATE FC_Meta_%s SET Value='%s' WHERE DirID=%d" % (metaName, metaValue, dirID)
result = self.db._update(req)
if not result['OK']:
return result
else:
return result
return S_OK()
def removeMetadata(self, dpath, metadata, credDict):
""" Remove the specified metadata for the given directory
"""
result = self.getMetadataFields(credDict)
if not result['OK']:
return result
metaFields = result['Value']
result = self.db.dtree.findDir(dpath)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Path not found: %s' % dpath)
dirID = result['Value']
failedMeta = {}
for meta in metadata:
if meta in metaFields:
# Indexed meta case
req = "DELETE FROM FC_Meta_%s WHERE DirID=%d" % (meta, dirID)
result = self.db._update(req)
if not result['OK']:
failedMeta[meta] = result['Value']
else:
# Meta parameter case
req = "DELETE FROM FC_DirMeta WHERE MetaKey='%s' AND DirID=%d" % (meta, dirID)
result = self.db._update(req)
if not result['OK']:
failedMeta[meta] = result['Value']
if failedMeta:
metaExample = failedMeta.keys()[0]
result = S_ERROR('Failed to remove %d metadata, e.g. %s' % (len(failedMeta), failedMeta[metaExample]))
result['FailedMetadata'] = failedMeta
else:
return S_OK()
def setMetaParameter(self, dpath, metaName, metaValue, credDict):
""" Set an meta parameter - metadata which is not used in the the data
search operations
"""
result = self.db.dtree.findDir(dpath)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Path not found: %s' % dpath)
dirID = result['Value']
result = self.db.insertFields('FC_DirMeta',
['DirID', 'MetaKey', 'MetaValue'],
[dirID, metaName, str(metaValue)])
return result
def getDirectoryMetaParameters(self, dpath, credDict, inherited=True, owndata=True):
""" Get meta parameters for the given directory
"""
if inherited:
result = self.db.dtree.getPathIDs(dpath)
if not result['OK']:
return result
pathIDs = result['Value']
dirID = pathIDs[-1]
else:
result = self.db.dtree.findDir(dpath)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Path not found: %s' % dpath)
dirID = result['Value']
pathIDs = [dirID]
if len(pathIDs) > 1:
pathString = ','.join([str(x) for x in pathIDs])
req = "SELECT DirID,MetaKey,MetaValue from FC_DirMeta where DirID in (%s)" % pathString
else:
req = "SELECT DirID,MetaKey,MetaValue from FC_DirMeta where DirID=%d " % dirID
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_OK({})
metaDict = {}
for _dID, key, value in result['Value']:
if key in metaDict:
if isinstance(metaDict[key], list):
metaDict[key].append(value)
else:
metaDict[key] = [metaDict[key]].append(value)
else:
metaDict[key] = value
return S_OK(metaDict)
def getDirectoryMetadata(self, path, credDict, inherited=True, owndata=True):
""" Get metadata for the given directory aggregating metadata for the directory itself
and for all the parent directories if inherited flag is True. Get also the non-indexed
metadata parameters.
"""
result = self.db.dtree.getPathIDs(path)
if not result['OK']:
return result
pathIDs = result['Value']
result = self.getMetadataFields(credDict)
if not result['OK']:
return result
metaFields = result['Value']
metaDict = {}
metaOwnerDict = {}
metaTypeDict = {}
dirID = pathIDs[-1]
if not inherited:
pathIDs = pathIDs[-1:]
if not owndata:
pathIDs = pathIDs[:-1]
pathString = ','.join([str(x) for x in pathIDs])
for meta in metaFields:
req = "SELECT Value,DirID FROM FC_Meta_%s WHERE DirID in (%s)" % (meta, pathString)
result = self.db._query(req)
if not result['OK']:
return result
if len(result['Value']) > 1:
return S_ERROR('Metadata conflict for %s for directory %s' % (meta, path))
if result['Value']:
metaDict[meta] = result['Value'][0][0]
if int(result['Value'][0][1]) == dirID:
metaOwnerDict[meta] = 'OwnMetadata'
else:
metaOwnerDict[meta] = 'ParentMetadata'
metaTypeDict[meta] = metaFields[meta]
# Get also non-searchable data
result = self.getDirectoryMetaParameters(path, credDict, inherited, owndata)
if result['OK']:
metaDict.update(result['Value'])
for meta in result['Value']:
metaOwnerDict[meta] = 'OwnParameter'
result = S_OK(metaDict)
result['MetadataOwner'] = metaOwnerDict
result['MetadataType'] = metaTypeDict
return result
def __transformMetaParameterToData(self, metaname):
""" Relocate the meta parameters of all the directories to the corresponding
indexed metadata table
"""
req = "SELECT DirID,MetaValue from FC_DirMeta WHERE MetaKey='%s'" % metaname
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_OK()
dirDict = {}
for dirID, meta in result['Value']:
dirDict[dirID] = meta
dirList = dirDict.keys()
# Exclude child directories from the list
for dirID in dirList:
result = self.db.dtree.getSubdirectoriesByID(dirID)
if not result['OK']:
return result
if not result['Value']:
continue
childIDs = result['Value'].keys()
for childID in childIDs:
if childID in dirList:
del dirList[dirList.index(childID)]
insertValueList = []
for dirID in dirList:
insertValueList.append("( %d,'%s' )" % (dirID, dirDict[dirID]))
req = "INSERT INTO FC_Meta_%s (DirID,Value) VALUES %s" % (metaname, ', '.join(insertValueList))
result = self.db._update(req)
if not result['OK']:
return result
req = "DELETE FROM FC_DirMeta WHERE MetaKey='%s'" % metaname
result = self.db._update(req)
return result
############################################################################################
#
# Find directories corresponding to the metadata
#
def __createMetaSelection(self, meta, value, table=''):
if isinstance(value, dict):
selectList = []
for operation, operand in value.items():
if operation in ['>', '<', '>=', '<=']:
if isinstance(operand, list):
return S_ERROR('Illegal query: list of values for comparison operation')
if isinstance(operand, (int, long)):
selectList.append("%sValue%s%d" % (table, operation, operand))
elif isinstance(operand, float):
selectList.append("%sValue%s%f" % (table, operation, operand))
else:
selectList.append("%sValue%s'%s'" % (table, operation, operand))
elif operation == 'in' or operation == "=":
if isinstance(operand, list):
vString = ','.join(["'" + str(x) + "'" for x in operand])
selectList.append("%sValue IN (%s)" % (table, vString))
else:
selectList.append("%sValue='%s'" % (table, operand))
elif operation == 'nin' or operation == "!=":
if isinstance(operand, list):
vString = ','.join(["'" + str(x) + "'" for x in operand])
selectList.append("%sValue NOT IN (%s)" % (table, vString))
else:
selectList.append("%sValue!='%s'" % (table, operand))
selectString = ' AND '.join(selectList)
elif isinstance(value, list):
vString = ','.join(["'" + str(x) + "'" for x in value])
selectString = "%sValue in (%s)" % (table, vString)
else:
if value == "Any":
selectString = ''
else:
selectString = "%sValue='%s' " % (table, value)
return S_OK(selectString)
def __findSubdirByMeta(self, meta, value, pathSelection='', subdirFlag=True):
""" Find directories for the given meta datum. If the the meta datum type is a list,
combine values in OR. In case the meta datum is 'Any', finds all the subdirectories
for which the meta datum is defined at all.
"""
result = self.__createMetaSelection(meta, value, "M.")
if not result['OK']:
return result
selectString = result['Value']
req = " SELECT M.DirID FROM FC_Meta_%s AS M" % meta
if pathSelection:
req += " JOIN ( %s ) AS P WHERE M.DirID=P.DirID" % pathSelection
if selectString:
if pathSelection:
req += " AND %s" % selectString
else:
req += " WHERE %s" % selectString
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_OK([])
dirList = []
for row in result['Value']:
dirID = row[0]
dirList.append(dirID)
# if subdirFlag:
# result = self.db.dtree.getSubdirectoriesByID( dirID )
# if not result['OK']:
# return result
# dirList += result['Value']
if subdirFlag:
result = self.db.dtree.getAllSubdirectoriesByID(dirList)
if not result['OK']:
return result
dirList += result['Value']
return S_OK(dirList)
def __findSubdirMissingMeta(self, meta, pathSelection):
""" Find directories not having the given meta datum defined
"""
result = self.__findSubdirByMeta(meta, 'Any', pathSelection)
if not result['OK']:
return result
dirList = result['Value']
table = self.db.dtree.getTreeTable()
dirString = ','.join([str(x) for x in dirList])
if dirList:
req = 'SELECT DirID FROM %s WHERE DirID NOT IN ( %s )' % (table, dirString)
else:
req = 'SELECT DirID FROM %s' % table
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_OK([])
dirList = [x[0] for x in result['Value']]
return S_OK(dirList)
def __expandMetaDictionary(self, metaDict, credDict):
""" Expand the dictionary with metadata query
"""
result = self.getMetadataFields(credDict)
if not result['OK']:
return result
metaTypeDict = result['Value']
resultDict = {}
extraDict = {}
for key, value in metaDict.items():
if key not in metaTypeDict:
# return S_ERROR( 'Unknown metadata field %s' % key )
extraDict[key] = value
continue
keyType = metaTypeDict[key]
if keyType != "MetaSet":
resultDict[key] = value
else:
result = self.getMetadataSet(value, True, credDict)
if not result['OK']:
return result
mDict = result['Value']
for mk, mv in mDict.items():
if mk in resultDict:
return S_ERROR('Contradictory query for key %s' % mk)
else:
resultDict[mk] = mv
result = S_OK(resultDict)
result['ExtraMetadata'] = extraDict
return result
def __checkDirsForMetadata(self, meta, value, pathString):
""" Check if any of the given directories conform to the given metadata
"""
result = self.__createMetaSelection(meta, value, "M.")
if not result['OK']:
return result
selectString = result['Value']
if selectString:
req = "SELECT M.DirID FROM FC_Meta_%s AS M WHERE %s AND M.DirID IN (%s)" % (meta, selectString, pathString)
else:
req = "SELECT M.DirID FROM FC_Meta_%s AS M WHERE M.DirID IN (%s)" % (meta, pathString)
result = self.db._query(req)
if not result['OK']:
return result
elif not result['Value']:
return S_OK(None)
elif len(result['Value']) > 1:
return S_ERROR('Conflict in the directory metadata hierarchy')
else:
return S_OK(result['Value'][0][0])
@queryTime
def findDirIDsByMetadata(self, queryDict, path, credDict):
""" Find Directories satisfying the given metadata and being subdirectories of
the given path
"""
pathDirList = []
pathDirID = 0
pathString = '0'
if path != '/':
result = self.db.dtree.getPathIDs(path)
if not result['OK']:
# as result[Value] is already checked in getPathIDs
return result
pathIDs = result['Value']
pathDirID = pathIDs[-1]
pathString = ','.join([str(x) for x in pathIDs])
result = self.__expandMetaDictionary(queryDict, credDict)
if not result['OK']:
return result
metaDict = result['Value']
# Now check the meta data for the requested directory and its parents
finalMetaDict = dict(metaDict)
for meta in metaDict.keys():
result = self.__checkDirsForMetadata(meta, metaDict[meta], pathString)
if not result['OK']:
return result
elif result['Value'] is not None:
# Some directory in the parent hierarchy is already conforming with the
# given metadata, no need to check it further
del finalMetaDict[meta]
if finalMetaDict:
pathSelection = ''
if pathDirID:
result = self.db.dtree.getSubdirectoriesByID(pathDirID, includeParent=True, requestString=True)
if not result['OK']:
return result
pathSelection = result['Value']
dirList = []
first = True
for meta, value in finalMetaDict.items():
if value == "Missing":
result = self.__findSubdirMissingMeta(meta, pathSelection)
else:
result = self.__findSubdirByMeta(meta, value, pathSelection)
if not result['OK']:
return result
mList = result['Value']
if first:
dirList = mList
first = False
else:
newList = []
for d in dirList:
if d in mList:
newList.append(d)
dirList = newList
else:
if pathDirID:
result = self.db.dtree.getSubdirectoriesByID(pathDirID, includeParent=True)
if not result['OK']:
return result
pathDirList = result['Value'].keys()
finalList = []
dirSelect = False
if finalMetaDict:
dirSelect = True
finalList = dirList
if pathDirList:
finalList = list(set(dirList) & set(pathDirList))
else:
if pathDirList:
dirSelect = True
finalList = pathDirList
result = S_OK(finalList)
if finalList:
result['Selection'] = 'Done'
elif dirSelect:
result['Selection'] = 'None'
else:
result['Selection'] = 'All'
return result
@queryTime
def findDirectoriesByMetadata(self, queryDict, path, credDict):
""" Find Directory names satisfying the given metadata and being subdirectories of
the given path
"""
result = self.findDirIDsByMetadata(queryDict, path, credDict)
if not result['OK']:
return result
dirIDList = result['Value']
dirNameDict = {}
if dirIDList:
result = self.db.dtree.getDirectoryPaths(dirIDList)
if not result['OK']:
return result
dirNameDict = result['Value']
elif result['Selection'] == 'None':
dirNameDict = {0: "None"}
elif result['Selection'] == 'All':
dirNameDict = {0: "All"}
return S_OK(dirNameDict)
def findFilesByMetadata(self, metaDict, path, credDict):
""" Find Files satisfying the given metadata
"""
result = self.findDirectoriesByMetadata(metaDict, path, credDict)
if not result['OK']:
return result
dirDict = result['Value']
dirList = dirDict.keys()
fileList = []
result = self.db.dtree.getFilesInDirectory(dirList, credDict)
if not result['OK']:
return result
for _fileID, dirID, fname in result['Value']:
fileList.append(dirDict[dirID] + '/' + os.path.basename(fname))
return S_OK(fileList)
def findFileIDsByMetadata(self, metaDict, path, credDict, startItem=0, maxItems=25):
""" Find Files satisfying the given metadata
"""
result = self.findDirIDsByMetadata(metaDict, path, credDict)
if not result['OK']:
return result
dirList = result['Value']
return self.db.dtree.getFileIDsInDirectoryWithLimits(dirList, credDict, startItem, maxItems)
################################################################################################
#
# Find metadata compatible with other metadata in order to organize dynamically updated metadata selectors
def __findCompatibleDirectories(self, meta, value, fromDirs):
""" Find directories compatible with the given meta datum.
Optionally limit the list of compatible directories to only those in the
fromDirs list
"""
# The directories compatible with the given meta datum are:
# - directory for which the datum is defined
# - all the subdirectories of the above directory
# - all the directories in the parent hierarchy of the above directory
# Find directories defining the meta datum and their subdirectories
result = self.__findSubdirByMeta(meta, value, subdirFlag=False)
if not result['OK']:
return result
selectedDirs = result['Value']
if not selectedDirs:
return S_OK([])
result = self.db.dtree.getAllSubdirectoriesByID(selectedDirs)
if not result['OK']:
return result
subDirs = result['Value']
# Find parent directories of the directories defining the meta datum
parentDirs = []
for psub in selectedDirs:
result = self.db.dtree.getPathIDsByID(psub)
if not result['OK']:
return result
parentDirs += result['Value']
# Constrain the output to only those that are present in the input list
resDirs = parentDirs + subDirs + selectedDirs
if fromDirs:
resDirs = list(set(resDirs) & set(fromDirs))
return S_OK(resDirs)
def __findDistinctMetadata(self, metaList, dList):
""" Find distinct metadata values defined for the list of the input directories.
Limit the search for only metadata in the input list
"""
if dList:
dString = ','.join([str(x) for x in dList])
else:
dString = None
metaDict = {}
for meta in metaList:
req = "SELECT DISTINCT(Value) FROM FC_Meta_%s" % meta
if dString:
req += " WHERE DirID in (%s)" % dString
result = self.db._query(req)
if not result['OK']:
return result
if result['Value']:
metaDict[meta] = []
for row in result['Value']:
metaDict[meta].append(row[0])
return S_OK(metaDict)
def getCompatibleMetadata(self, queryDict, path, credDict):
""" Get distinct metadata values compatible with the given already defined metadata
"""
pathDirID = 0
if path != '/':
result = self.db.dtree.findDir(path)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Path not found: %s' % path)
pathDirID = int(result['Value'])
pathDirs = []
if pathDirID:
result = self.db.dtree.getSubdirectoriesByID(pathDirID, includeParent=True)
if not result['OK']:
return result
if result['Value']:
pathDirs = result['Value'].keys()
result = self.db.dtree.getPathIDsByID(pathDirID)
if not result['OK']:
return result
if result['Value']:
pathDirs += result['Value']
# Get the list of metadata fields to inspect
result = self.getMetadataFields(credDict)
if not result['OK']:
return result
metaFields = result['Value']
comFields = metaFields.keys()
# Commented out to return compatible data also for selection metadata
# for m in metaDict:
# if m in comFields:
# del comFields[comFields.index( m )]
result = self.__expandMetaDictionary(queryDict, credDict)
if not result['OK']:
return result
metaDict = result['Value']
fromList = pathDirs
anyMeta = True
if metaDict:
anyMeta = False
for meta, value in metaDict.items():
result = self.__findCompatibleDirectories(meta, value, fromList)
if not result['OK']:
return result
cdirList = result['Value']
if cdirList:
fromList = cdirList
else:
fromList = []
break
if anyMeta or fromList:
result = self.__findDistinctMetadata(comFields, fromList)
else:
result = S_OK({})
return result
def removeMetadataForDirectory(self, dirList, credDict):
""" Remove all the metadata for the given directory list
"""
if not dirList:
return S_OK({'Successful': {}, 'Failed': {}})
failed = {}
successful = {}
dirs = dirList
if not isinstance(dirList, list):
dirs = [dirList]
dirListString = ','.join([str(d) for d in dirs])
# Get the list of metadata fields to inspect
result = self.getMetadataFields(credDict)
if not result['OK']:
return result
metaFields = result['Value']
for meta in metaFields:
req = "DELETE FROM FC_Meta_%s WHERE DirID in ( %s )" % (meta, dirListString)
result = self.db._query(req)
if not result['OK']:
failed[meta] = result['Message']
else:
successful[meta] = 'OK'
return S_OK({'Successful': successful, 'Failed': failed})
|
fstagni/DIRAC
|
DataManagementSystem/DB/FileCatalogComponents/DirectoryMetadata.py
|
Python
|
gpl-3.0
| 27,739
|
[
"DIRAC"
] |
55d7c7ee2466c5b8250e1064144fac49cfadd927f8c8447d7e03ec73c114af24
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from sympy import *
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.stats import binom
mpl.rcParams['pdf.fonttype'] = 42
plt.ion()
plt.close('all')
"""
Analyze L4-L2 pooling capacity
"""
def calculateNumCellsVsK(kVal, nVal, wVal, mVal):
# number of columns
n = Symbol("n", positive=True)
# number of cells
m = Symbol("m", positive=True)
# number of connections per pattern
w = Symbol("w", positive=True)
# number of (feature, location) pairs
k = Symbol("k", positive=True)
numCellsInUnion = n * m * (1 - pow(1 - w / (n * m), k))
numCellsInUnionVal = numCellsInUnion.subs(n, nVal).subs(w, wVal).subs(
k, kVal).subs(m, mVal).evalf()
return numCellsInUnionVal
def calculateSDRFalseMatchError(kVal,
thetaVal=20,
nVal=2048,
wVal=40,
mVal=10,
cVal=5):
numCellsInUnionVal = calculateNumCellsVsK(kVal, nVal, cVal, mVal)
pMatchBit = float(numCellsInUnionVal)/ (nVal * mVal)
pFalseMatch = 1 - binom.cdf(thetaVal, wVal, pMatchBit)
return pFalseMatch
def calculateObjectFalseMatchError(kVal, thetaVal=20, nVal=2048, wVal=40, mVal=10):
pFalseMatchSDR = calculateSDRFalseMatchError(kVal, thetaVal, nVal, wVal, mVal)
pFalseMatchObj = 1 - pow(1-pFalseMatchSDR, kVal)
return pFalseMatchObj
def generateL4SDR(n=2048, m=10, w=40):
""" Generate single L4 SDR, return active bits"""
colOrder = np.random.permutation(np.arange(n))
activeCols = colOrder[:w]
activeCells = np.random.randint(low=0, high=m, size=(w, ))
activeBits = activeCols * m + activeCells
return activeBits
def generateUnionSDR(k, n=2048, m=10, w=40, c=None):
""" Generate a set of L4 cells that are connected to a L2 neuron """
if c is None:
c = w
activeCells = set()
connectedCells = set()
for i in range(k):
activeBits = generateL4SDR(n, m, w)
activeBits = np.random.permutation(activeBits)
activeCells = activeCells.union(activeBits)
connectedCells = connectedCells.union(activeBits[:c])
return connectedCells, activeCells
def generateMultipleUnionSDRs(numL2Cell, k, n=2048, m=10, w=40, c=None):
""" Generate numL2Cell set of L4 cells that are connected to numL2Cell
L2 cells
"""
if c is None:
c = w
activeCells = np.zeros((n * m, ))
connectedCells = []
for j in range(numL2Cell):
connectedCells.append(np.zeros((n * m, )))
for i in range(k):
activeBits = generateL4SDR(n, m, w)
activeCells[activeBits] = 1
for j in range(numL2Cell):
activeBits = np.random.permutation(activeBits)
connectedCells[j][activeBits[:c]] = 1
return connectedCells, activeCells
def simulateFalseMatchError(threshList, k, n=2048, w=40, m=10, c=10):
connectedCells, activeCells = generateUnionSDR(k, n, m, w, c)
numRpts = 10000
numSDRMatch = []
for rpt in range(numRpts):
sdr = set(generateL4SDR(n, m, w))
numSDRMatch.append(len(connectedCells.intersection(sdr)))
numSDRMatch = np.array(numSDRMatch)
falseMatchError = []
for thresh in threshList:
falseMatchError.append(
np.sum(np.greater(numSDRMatch, thresh)).astype('float32') / numRpts)
return falseMatchError
def simulateNumCellsVsK(kVal, nVal, wVal, mVal):
connectedCells, activeCells = generateUnionSDR(kVal, nVal, mVal, wVal)
numCellsInUnion = len(connectedCells)
return numCellsInUnion
def simulateL2CellPairsConditionalFalseMatch(b1, n=2048, w=40, m=10, c=10, k=100):
"""
Given an SDR that has b1 bits overlap with one L2 cell,
what is the chance that the SDR also has >theta bits overlap with
a second L2 cell for this object?
:param n: column # for L4
:param w: active cell # for L4
:param m: L4 cell # per column
:param c: connectivity per L4 column
:param k: (feature, location) # per object
:return:
"""
numRpts = 10000
overlapList = np.zeros((numRpts,))
for i in range(numRpts):
(connectedCells, activeCells) = generateMultipleUnionSDRs(
2, k, n, m, w, c)
connectedCells1 = np.where(connectedCells[0])[0]
nonConnectedCells1 = np.where(connectedCells[0]==0)[0]
selectConnectedCell1 = connectedCells1[
np.random.randint(0, len(connectedCells1), (b1, ))]
selectNonConnectedCell1 = nonConnectedCells1[
np.random.randint(0, len(nonConnectedCells1), (w-b1, ))]
overlap = 0
overlap += np.sum(connectedCells[1][selectConnectedCell1])
overlap += np.sum(connectedCells[1][selectNonConnectedCell1])
overlapList[i] = overlap
return overlapList
def simulateL2CellPairFalseMatch(theta, n=2048, w=40, m=10, c=10, k=100):
numRpts = 100000
numL2cell = 2
numMatchPair = 0
numMatch = np.zeros((numL2cell, ))
for _ in range(numRpts):
(connectedCells, activeCells) = generateMultipleUnionSDRs(
numL2cell, k, n, m, w, c)
l4SDR = generateL4SDR(n, m, w)
match = 1
for j in range(numL2cell):
if np.sum(connectedCells[j][l4SDR]) <= theta:
match = 0
else:
numMatch[j] += 1
numMatchPair += match
def simulateL4L2Pooling(theta=3, n=2048, w=40, m=10, c=5, k=100):
numRpts = 1000
numL2cell = 40
calculateNumCellsVsK(k, n, c, m)
numMatch = np.zeros((numRpts, numL2cell))
for i in range(numRpts):
print i
(connectedCells, activeCells) = generateMultipleUnionSDRs(
numL2cell, k, n, m, w, c)
l4SDR = generateL4SDR(n, m, w)
for j in range(numL2cell):
if np.sum(connectedCells[j][l4SDR]) > theta:
numMatch[i, j] += 1
plt.figure()
plt.hist(np.sum(numMatch, 1))
plt.xlabel('falsely activated L2 cells #')
plt.ylabel('Frequency ')
plt.savefig('L4L2PoolingSimulation.pdf')
def computeL2CellPairsFalseMatchChance(thetaVal, nVal, mVal, wVal, kVal, cVal):
n = Symbol("n", positive=True)
m = Symbol("m", positive=True)
w = Symbol("w", positive=True)
k = Symbol("k", positive=True)
c = Symbol("c", positive=True)
b1 = Symbol("b1", positive=True)
b2 = Symbol("b2", positive=True)
numOverlap = n * m * (1 - pow(1 - (c * c) / (w * n * m), k))
numCellsInUnion = n * m * (1 - pow(1 - c / (n * m), k))
numTotal = binomial(n*m - numCellsInUnion, w-b1) * binomial(numCellsInUnion, b1)
numCellsInUnionVal = int(numCellsInUnion.subs(k, kVal).subs(c, cVal).\
subs(n, nVal).subs(m, mVal).evalf())
numOverlapVal = int(
numOverlap.subs(k, kVal).subs(c, cVal).subs(n, nVal).subs(m, mVal).subs(
w, wVal).evalf())
pFalseMatchPair = 0
pFalseMatchb1ValDict = {}
pFalseMatchb1b2Dict = {}
pFalseMatchb2Givenb1Dict = {}
for b1Val in range(thetaVal+1, wVal+1):
p = numCellsInUnion / (n * m)
pFalseMatchb1 = binomial(w, b1) * pow(p, b1) * pow(1 - p, w - b1)
pFalseMatchb1Val = pFalseMatchb1.subs(b1, b1Val).subs(n, nVal).\
subs(m, mVal).subs(k, kVal).subs(w, wVal).subs(c, cVal).evalf()
numTotalVal = numTotal.subs(n, nVal).subs(m, mVal). \
subs(k, kVal).subs(w, wVal).subs(c, cVal).subs(b1, b1Val).evalf()
pFalseMatchb1ValDict[b1Val] = pFalseMatchb1Val
for b2Val in range(thetaVal+1, wVal+1):
minI = max(max(b1Val - (numCellsInUnionVal - numOverlapVal), 0),
max(b2Val - (numCellsInUnionVal - numOverlapVal), 0),
max(b1Val + b2Val - wVal, 0))
maxI = min(b1Val, b2Val)
if minI > maxI:
continue
numMatchPair = 0
for i in range(minI, maxI+1):
numMatchPair += (binomial(numOverlapVal, i) *
binomial(numCellsInUnionVal - numOverlapVal, b2Val - i) *
binomial(numCellsInUnionVal - numOverlapVal, b1Val - i) *
binomial(nVal*mVal-2*numCellsInUnionVal + numOverlapVal,
wVal-b1Val-b2Val+i))
pFalseMatchb2Givenb1 = (numMatchPair / numTotalVal)
pFalseMatchb2Givenb1Dict[(b1Val, b2Val)] = pFalseMatchb2Givenb1
pFalseMatchb1b2Dict[(b1Val, b2Val)] = pFalseMatchb2Givenb1 * pFalseMatchb1Val
pFalseMatchPair += pFalseMatchb2Givenb1 * pFalseMatchb1Val
pFalseMatchSingleCell = np.sum(np.array(pFalseMatchb1ValDict.values()))
return pFalseMatchPair, pFalseMatchSingleCell
def computeL2CellPairsFalseMatchConditionalProb(
b1Val, b2Val, nVal, mVal, wVal, kVal, cVal):
"""
Given that an L4 SDR with b1=10 bits overlap with L2 cell 1
What is the chance that this SDR has b2=10 bits overlap with L2 cell 2?
"""
n = Symbol("n", positive=True)
m = Symbol("m", positive=True)
w = Symbol("w", positive=True)
k = Symbol("k", positive=True)
c = Symbol("c", positive=True)
b1 = Symbol("b1", positive=True)
b2 = Symbol("b2", positive=True)
numOverlap = n * m * (1 - pow(1 - (c * c) / (w * n * m), k))
numCellsInUnion = n * m * (1 - pow(1 - c / (n * m), k))
numTotal = binomial(n*m - numCellsInUnion, w-b1) * binomial(numCellsInUnion, b1)
numOverlapVal = int(numOverlap.subs(k, kVal).subs(c, cVal).subs(n, nVal).
subs(m, mVal).subs(w, wVal).evalf())
numCellsInUnionVal = int(numCellsInUnion.subs(k, kVal).subs(c, cVal).
subs(n, nVal).subs(m, mVal).evalf())
numTotalVal = numTotal.subs(b1, b1Val).subs(b2, b2Val).\
subs(n, nVal).subs(m, mVal).subs(k, kVal).subs(w, wVal).subs(c, cVal).evalf()
print "Total SDR # ", numTotalVal
minI = max(max(b1Val - (numCellsInUnionVal - numOverlapVal), 0),
max(b2Val - (numCellsInUnionVal - numOverlapVal), 0))
maxI = min(b1Val, b2Val)
numMatchPair = 0
for i in range(minI, maxI+1):
numMatchPair += (binomial(numOverlapVal, i) *
binomial(numCellsInUnionVal - numOverlapVal, b2Val - i) *
binomial(numCellsInUnionVal - numOverlapVal, b1Val - i) *
binomial(nVal * mVal - 2 * numCellsInUnionVal + numOverlapVal,
wVal - b1Val - b2Val + i))
pFalseMatchPair = numMatchPair / numTotalVal
return pFalseMatchPair
def plotFalseMatchErrorSingleCell(cValList, thetaValList):
"""
False Match error for single L2 cell
:param cValList:
"""
kValList = np.arange(0, 500, 10)
fig, ax = plt.subplots(2, 1)
colorList = ['r', 'm', 'g', 'b', 'c']
legendList = []
for i in range(len(cValList)):
cVal = cValList[i]
thetaVal = thetaValList[i]
legendList.append('theta={}, c={}'.format(thetaVal, cVal))
FalseMatchRateSDR = []
numConnectedCells = []
for kVal in kValList:
FalseMatchRateSDR.append(calculateSDRFalseMatchError(
kVal, thetaVal, nVal, wVal, mVal, cVal))
numCellsInUnionVal = calculateNumCellsVsK(kVal, nVal, cVal, mVal)
numConnectedCells.append(numCellsInUnionVal)
ax[0].semilogy(kValList, FalseMatchRateSDR, colorList[i])
ax[1].plot(kValList, numConnectedCells, colorList[i])
ax[0].set_xlabel('# (feature, location)')
ax[0].set_ylabel('SDR false match error')
ax[0].set_ylim([pow(10, -13), 1])
ax[0].legend(legendList, loc=4)
ax[1].set_xlabel('# (feature, location)')
ax[1].set_ylabel('# connections')
def runExperimentFalseMatchConditionalPairError():
nVal = 2048
mVal = 10
wVal = 40
kVal = 100
b1Val = 10
b2Val = 10
cValList = [15, 20, 25, 30, 35, 38, 39, 40]
pFalseMatchPair = []
for cVal in cValList:
pFalseMatchPair.append(computeL2CellPairsFalseMatchConditionalProb(
b1Val, b2Val, nVal, mVal, wVal, kVal, cVal))
print "Verify Equation with simulations"
pFalseMatchPairSimulate = []
for cVal in cValList:
b2Overlap = simulateL2CellPairsConditionalFalseMatch(b1Val, nVal, wVal, mVal, cVal,
kVal)
pFalseMatchPairSimulate.append(np.mean(b2Overlap==b2Val))
print "b1 {} b2 {} c {} prob {}".format(
b1Val, b2Val, cVal, pFalseMatchPairSimulate[-1])
fig, ax = plt.subplots(1)
ax.plot(cValList, pFalseMatchPair,'-o')
ax.plot(cValList, pFalseMatchPairSimulate, '--rx')
ax.set_ylabel("P(oj=10|oi=10)")
ax.set_xlabel("Connection # per SDR")
plt.legend(['equation', 'simulation'])
plt.savefig('ConditionalFalseMatchPairErrorVsC.pdf')
def runExperimentSingleVsPairMatchError():
nVal = 2048
mVal = 10
wVal = 40
kVal = 100
thetaValList = [3, 6, 12, 18, 24]
cValList = [5, 10, 20, 30, 40]
pFalseMatchPairSingle = []
pFalseMatchPairList = []
for i in range(len(cValList)):
pFalseMatchPair, pFalseMatchSingleCell = computeL2CellPairsFalseMatchChance(
thetaValList[i], nVal, mVal, wVal, kVal, cValList[i])
print "c={} theta={} single error {} pair error {}".format(cValList[i],
thetaValList[i],
pFalseMatchSingleCell,
pFalseMatchPair)
pFalseMatchPairSingle.append(pFalseMatchSingleCell)
pFalseMatchPairList.append(pFalseMatchPair)
fig, ax = plt.subplots(1)
ax.semilogy(cValList, pFalseMatchPairSingle, '-bo')
ax.semilogy(cValList, pFalseMatchPairList, '-go')
ax.set_ylabel('False Match Error')
ax.set_xlabel('# connections per pattern')
plt.legend(['single L2', 'L2 neuron pairs'])
plt.savefig('FalseMatchPairErrorVsC.pdf')
def runExperimentUnionSize():
nVal = 2048
mVal = 10
wVal = 40
fig, ax = plt.subplots(1, 1)
legendList = []
for cVal in [10, 20, 30, 40]:
# theoretical values
numCellsVsK = []
kValList = np.arange(1, 500, 10)
for kVal in kValList:
numCellsInUnionVal = calculateNumCellsVsK(kVal, nVal, cVal, mVal)
numCellsVsK.append(numCellsInUnionVal)
legendList.append("c={}".format(cVal))
ax.plot(kValList, numCellsVsK)
for cVal in [10, 20, 30, 40]:
# simulation values
numCellsVsKsim = []
kValListSparse = np.arange(1, 500, 100)
for kVal in kValListSparse:
numCellsInUnionValSim = simulateNumCellsVsK(kVal, nVal, cVal, mVal)
numCellsVsKsim.append(numCellsInUnionValSim)
ax.plot(kValListSparse, numCellsVsKsim, 'ko')
ax.set_xlabel("# (feature, object) pair")
ax.set_ylabel("# L4 inputs per L2 cell")
ax.legend(legendList, loc=2)
plt.savefig('UnionSizeVsK.pdf')
if __name__ == "__main__":
nVal = 2048
mVal = 10
wVal = 40
cVal = 10
# plot the number of L4 cells that are connected to L2, as a function of
# (feature locaiton) pairs per object
runExperimentUnionSize()
# plot the false match error for single L2 cell
plotFalseMatchErrorSingleCell(cValList=[40, 40, 40, 40], thetaValList=[5, 10, 20, 30])
plt.savefig('FalseMatchErrVsK_FixedCVaryingTheta.pdf')
plotFalseMatchErrorSingleCell(cValList=[10, 20, 30, 40], thetaValList=[10, 10, 10, 10])
plt.savefig('FalseMatchErrVsK_FixedThetaVaryingC.pdf')
plotFalseMatchErrorSingleCell(cValList=[5, 10, 20, 30, 40], thetaValList=[3, 6, 12, 18, 24])
plt.savefig('FalseMatchErrVsK_VaryingThetaandC.pdf')
# plot conditional false match error
# given that an L4 SDR with b1=10 bits overlap with L2 cell 1
# what is the chance that this SDR has b2=10 bits overlap with L2 cell 2?
runExperimentFalseMatchConditionalPairError()
# plot simultaneous false match error for a pair of L2 cells
# what is the chance that an L4 SDR falsely activate two L2 cells?
runExperimentSingleVsPairMatchError()
|
ThomasMiconi/nupic.research
|
projects/poolingCapacity/layer2_capacity.py
|
Python
|
agpl-3.0
| 16,286
|
[
"NEURON"
] |
ed4c5453be6ec44b3f15549d39481332e1e5554c1ff54b160a8cd9c9c919c9ea
|
"""helper.py
Helper functions.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2015, Dilawar Singh and NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import __future__
import ast
import re
import math
import moose
# Bring imports from math to global namespace so that eval can use them.
from math import *
from config import logger_
funcs = math.__dict__.keys() + [ 'fmod', 'rand', 'rand2' ]
def to_bool(arg):
if arg.lower() in [ "0", "false", "no" ]:
return False
return True
def get_ids( expr ):
# The expression might also have ite-expression of muparser.
itePat = re.compile( r'(.+?)\?(.+?)\:(.+)' )
m = itePat.match( expr )
if m:
exprs = m.group(1, 2, 3)
else:
exprs = [ expr ]
ids = []
for expr in exprs:
try:
tree = ast.parse( expr )
except Exception as e:
logger_.warn( 'Expression not a valid python expression' )
logger_.warn( '\t Expression was %s' % expr )
logger_.warn( '\t Error during parsing %s' % e )
return []
for e in ast.walk( tree ):
if isinstance( e, ast.Name ):
if e.id not in funcs:
ids.append( e.id )
return ids
##
# @brief Eval the expression using python. The __future__ related compile flags
# make sure that 1/3 is reduced 0.33333 instead of 0.
#
# @param expr
#
# @return Reduced value as string whenever possible.
def reduce_expr( expr ):
isReduced = 'false'
try:
val = eval(
compile( expr , '<string>', 'eval',
__future__.division.compiler_flag )
)
isReduced = 'true'
except Exception as e:
# logger_.debug( 'Failed to reduce %s' % expr )
# logger_.debug( '\tError was %s' % e )
val = expr
return str(val), isReduced
def to_float( expr ):
val, isReduced = reduce_expr( expr )
if isReduced:
return float(val)
else:
raise RuntimeError( 'Failed to convert to float : %s' % expr )
def compt_info( compt ):
"""Get the compartment info as string"""
info = ''
if isinstance( compt, moose.CubeMesh ):
info += 'Cube\n'
info += '\tx0, y0, z0 : %s, %s, %s\n' % (compt.x0, compt.y0, compt.z0)
info += '\tx1, y1, z1 : %s, %s, %s\n' % (compt.x1, compt.y1, compt.z1)
info += '\tvolume : %s' % compt.volume
elif isinstance( compt, moose.CylMesh ):
info += 'Cylinder:\n'
info += '\tr0, r1 : %s, %s\n' % (compt.r0, compt.r1 )
info += '\tx0, y0, z0 : %s, %s, %s\n' % (compt.x0, compt.y0, compt.z0 )
info += '\tx1, y1, z1 : %s, %s, %s\n' % (compt.x1, compt.y1, compt.z1 )
try:
info ++ '\tvolume = %s' % compt.volume
except Exception as e:
pass
else:
info += "Unknown/unsupported compartment type %s" % compt
return info
def pool_info( pool ):
info = ''
info += ' n0 = %f,' % pool.nInit
info += ' n = %s,' % pool.n
info += ' diffConst = %s,' % pool.diffConst
return info
|
dilawar/moose-chemical
|
utils/helper.py
|
Python
|
gpl-2.0
| 3,321
|
[
"MOOSE"
] |
4b4d7bbdc89f7a1464f2f6b813bed4f3271f0178b60ebd9dc1d9e7eea22140ce
|
from .GlobalData import global_data
from .Projects import all_projects
from .Apps import all_apps
from .Pods import all_pods
from .Task import Task
from .Session import Session
from .CustomizedTask import customizedTask
from .CerberusIntegration import cerberusIntegration
from .utils.SlackIntegration import slackIntegration
from concurrent.futures import ThreadPoolExecutor
import logging
import os
import time
import datetime
import sys
class TaskManager:
def __init__(self, cerberus_history_file):
self.logger = logging.getLogger('reliability')
self.time_subs = {}
self.time_subs["minute"] = 60
self.time_subs["hour"] = 3600
self.time_subs["day"] = 86400
self.time_subs["week"] = 604800
self.time_subs["month"] = 2419200
self.init_timing()
self.cwd = os.getcwd()
self.cerberus_history_file = cerberus_history_file
def init_timing(self):
def parse_time(time_string):
unit = time_string[-1:]
value = int(time_string[:-1])
if unit == "s" :
value = value
elif unit == "m":
value = value * 60
elif unit == "h":
value = value * 3600
return value
time_subs = {}
time_subs = global_data.config['timeSubstitutions']
for unit in time_subs.keys():
self.time_subs[unit] = parse_time(time_subs[unit])
def init_tasks(self):
self.next_execution_time["minute"] = self.time_subs["minute"]
self.next_execution_time["hour"] = self.time_subs["hour"]
self.next_execution_time["day"] = self.time_subs["day"]
self.next_execution_time["week"] = self.time_subs["week"]
self.next_execution_time["month"] = self.time_subs["month"]
self.next_task = {}
def calculate_next_execution(self):
next_execution_time = sys.maxsize
next_execution = {}
for interval in self.next_execution_time.keys():
if self.next_execution_time[interval] < next_execution_time:
next_execution_time = self.next_execution_time[interval]
next_execution = {}
next_execution[interval] = next_execution_time
elif self.next_execution_time[interval] == next_execution_time:
next_execution[interval] = self.next_execution_time[interval]
return (next_execution, next_execution_time)
def schedule_next(self,execution_type):
self.next_execution_time[execution_type] += self.time_subs[execution_type]
def start_test(self):
all_pods.init()
all_apps.init()
all_projects.init()
task = Task(global_data.config,{'action': 'create', 'resource': 'projects','quantity': 2})
task.execute()
# task = Task(global_data.config,{'action': 'scaleUp', 'resource': 'apps'})
# task.execute()
# task = Task(global_data.config,{'action': 'scaleDown', 'resource': 'apps'})
# task.execute()
# task = Task(global_data.config,{'action': 'visit', 'resource': 'apps'})
# task.execute()
task = Task(global_data.config,{'action': 'delete', 'resource': 'projects'})
task.execute()
def check_desired_state(self):
if os.path.isfile(self.cwd + "/halt"):
slackIntegration.post_message_in_slack("Reliability test is going to halt.")
state = "halt"
self.logger.info("Halt file found, shutting down reliability.")
elif os.path.isfile(self.cwd + "/pause"):
state = "pause"
self.logger.info("Pause file found - pausing.")
else:
state = "run"
if global_data.cerberus_enable:
cerberus_status = cerberusIntegration.get_status(global_data.cerberus_api)
if cerberus_status == "False":
if global_data.cerberus_fail_action == "halt":
state = "halt"
self.logger.warning("Cerberus status is 'False'. Halt reliability test.")
elif global_data.cerberus_fail_action == "pause":
state = "pause"
self.logger.warning("Cerberus status is 'False'. Pause reliability test. Resolve cerberus failure to continue.")
elif global_data.cerberus_fail_action == "continue":
self.logger.warning("Cerberus status is 'False'. Reliability test will continue.")
else:
self.logger.warning(f"Cerberus status is False. cerberus_fail_action '{global_data.cerberus_fail_action}' is not recognized. Reliability test will continue.")
elif cerberus_status == "True":
self.logger.info("Cerberus status is 'True'.")
else:
self.logger.warning(f"Getting Cerberus status failed, response is '{cerberus_status}'.")
cerberusIntegration.save_history(global_data.cerberus_api, self.cerberus_history_file)
return state
# re-login all users to avoid login session token in kubeconfig expiration. The default timeout is 1 day.
def relogin(self):
# re-login 23 hours since last login
if time.time() - global_data.last_login_time > 3600*23:
self.logger.info("Re-login for all users to avoid login session token expiration")
login_args = []
for user in global_data.users:
password = global_data.users[user].password
kubeconfig = global_data.kubeconfigs[user]
login_args.append((user, password, kubeconfig))
# login concurrently
with ThreadPoolExecutor(max_workers=51) as executor:
results = executor.map(lambda t: Session().login(*t), login_args)
for result in results:
self.logger.info(result)
global_data.last_login_time = time.time()
def dump_stats(self):
status = []
status.append(f"Total projects: {str(all_projects.total_projects)}")
status.append(f"Failed apps: {str(all_apps.failed_apps)}")
status.append(f"Successful app visits: {str(global_data.app_visit_succeeded)}")
status.append(f"Failed app visits: {str(global_data.app_visit_failed)}")
status.append(f"Total builds: {str(global_data.total_build_count)}")
status.append(f"Successful customized task: {str(customizedTask.customized_task_succeeded)}")
status.append(f"Failed customized task: {str(customizedTask.customized_task_failed)}")
status = "\n".join(status)
self.logger.info("Reliability test status:\n"+ status)
slackIntegration.post_message_in_slack("Reliability test status:\n" + status)
def start(self):
self.logger.info("Task manager started in working directory: " + self.cwd + " at: " + str(datetime.datetime.now()))
self.next_execution_time = {}
self.init_tasks()
(next_execution, next_execution_time) = self.calculate_next_execution()
current_time = 0
all_pods.init()
all_apps.init()
all_projects.init()
max_projects = global_data.maxProjects
# get the projects creation concurrency
projects_create_concurrency = 0
try:
for tasks in global_data.config["tasks"].values():
for task in tasks:
if task["action"] == "create" and task["resource"] == "projects":
projects_create_concurrency = (task["concurrency"] if (task["concurrency"] > projects_create_concurrency) else projects_create_concurrency)
except KeyError as e :
self.logger.warning("KeyError " + str(e))
if projects_create_concurrency != 0:
if max_projects < projects_create_concurrency:
self.logger.warning(f"maxProjects {max_projects} should be larger than the projects create concurrency {projects_create_concurrency}")
# as projects are created concurrently, the next round will not start if the left capacity is less than the concurrency
all_projects.max_projects = max_projects-max_projects%projects_create_concurrency
self.logger.info(str(all_projects.max_projects) + " is set as the max projects number regarding to the concurrency "
+ str(projects_create_concurrency) + ". Origin maxProjects is " + str(max_projects))
state = "run"
while state == "run" or state == "pause":
self.logger.debug("Current time: " + str(current_time) + " next execution: " + str(next_execution))
state = self.check_desired_state()
if current_time >= next_execution_time and state == "run" :
for execution_type in next_execution.keys():
if execution_type in global_data.config["tasks"]:
tasks = global_data.config["tasks"][execution_type]
for task_to_execute in tasks:
self.relogin()
task = Task(task_to_execute)
task.execute()
self.schedule_next(execution_type)
(next_execution, next_execution_time) = self.calculate_next_execution()
if state == "pause":
self.dump_stats()
self.logger.info(f"Sleep '{global_data.sleepTime}' seconds before running next task type (minute/hour/day/week/month).")
time.sleep(global_data.sleepTime)
current_time += global_data.sleepTime
self.dump_stats()
|
mffiedler/svt
|
reliability/tasks/TaskManager.py
|
Python
|
apache-2.0
| 9,751
|
[
"VisIt"
] |
31600a0ebc5fcf73e4624b2e20481ee45ea0f91f96f65974f3ad092d68c9b157
|
# This file is part of MAUS: http://micewww.pp.rl.ac.uk/projects/maus
#
# MAUS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAUS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAUS. If not, see <http://www.gnu.org/licenses/>.
"""
test for maus_cpp.polynomial_map
"""
import random
import unittest
import numpy
import maus_cpp.polynomial_map
from maus_cpp.polynomial_map import PolynomialMap
class PolynomialMapTestCase(unittest.TestCase): # pylint: disable = R0904
"""
test for maus_cpp.polynomial_map
"""
def setUp(self): # pylint: disable = C0103
"""
test setUp
"""
self.coefficients = [
[1., 2., 3.],
[4., 5., 6.],
]
def test_init(self):
"""
test for maus_cpp.polynomial_map.PolynomialMap.__init__
"""
my_map = PolynomialMap(2, self.coefficients)
self.assertEqual(my_map.get_coefficients_as_matrix(), self.coefficients)
try:
PolynomialMap(2, "hello")
self.assertFalse(True, msg="Should have thrown")
except TypeError:
pass
try:
PolynomialMap(2, [])
self.assertFalse(True, msg="Should have thrown, no rows")
except ValueError:
pass
try:
PolynomialMap(2, [[]])
self.assertFalse(True, msg="Should have thrown, no columns")
except ValueError:
pass
try:
PolynomialMap(2, [[1.], [1., 2.]])
self.assertFalse(True, msg="Should have thrown, not rectangular")
except ValueError:
pass
try:
PolynomialMap(2, [["a"]])
self.assertFalse(True, msg="Should have thrown, wrong type")
except TypeError:
pass
PolynomialMap(1, [[1.]])
def test_evaluate(self):
"""
test for maus_cpp.polynomial_map.PolynomialMap.evaluate
"""
my_map = PolynomialMap(2, self.coefficients)
value = my_map.evaluate([-1., -2.])
self.assertEqual(value, [1.+2.*-1.+3.*-2., 4.+5.*-1.+6.*-2.])
try:
my_map.evaluate("hello")
self.assertFalse(True, msg="Should have thrown, wrong type")
except TypeError:
pass
try:
my_map.evaluate([1.])
self.assertFalse(True, msg="Should have thrown, wrong dimension")
except TypeError:
pass
try:
my_map.evaluate([1., "hello"])
self.assertFalse(True, msg="Should have thrown, wrong type")
except TypeError:
pass
def test_lsf(self):
"""
test for maus_cpp.polynomial_map.PolynomialMap.least_squares_fit
"""
my_map = PolynomialMap(2, self.coefficients)
points = [[random.uniform(-5, 5), random.uniform(-5, 5)] \
for i in range(100)]
values = [my_map.evaluate(a_point) for a_point in points]
my_map_2 = PolynomialMap.least_squares_fit(points, values, 1)
ref_coeffs = my_map.get_coefficients_as_matrix()
test_coeffs = my_map_2.get_coefficients_as_matrix()
for i in range(2):
for j in range(3):
self.assertAlmostEqual(ref_coeffs[i][j], test_coeffs[i][j])
def test_lsf_bad_input(self):
"""
test for maus_cpp.polynomial_map.PolynomialMap.least_squares_fit inputs
"""
points = [[random.uniform(-5, 5), random.uniform(-5, 5)] \
for i in range(100)]
values = [[random.uniform(-5, 5), random.uniform(-5, 5)] \
for i in range(100)]
maus_cpp.polynomial_map.PolynomialMap.least_squares_fit(points,
values,
1)
try:
PolynomialMap.least_squares_fit(points, values, -1)
self.assertFalse(True, msg="Should have thrown, bad order")
except ValueError:
pass
values = [[random.uniform(-5, 5), random.uniform(-5, 5)] \
for i in range(10)]
try:
PolynomialMap.least_squares_fit(points, values, 1)
self.assertFalse(True, msg="Should have thrown, misaligned")
except ValueError:
pass
try:
PolynomialMap.least_squares_fit(points, values, "moose")
self.assertFalse(True, msg="Should have thrown, bad type")
except TypeError:
pass
values = [[random.uniform(-5, 5)] for i in range(100)]
try:
PolynomialMap.least_squares_fit(points, values, 1)
self.assertFalse(True, msg="Should have thrown, misaligned")
except ValueError:
pass
values = [[random.uniform(-5, 5), "cheese"] for i in range(100)]
try:
PolynomialMap.least_squares_fit(points, values, 1)
self.assertFalse(True, msg="Should have thrown, not a float")
except ValueError:
pass
values = [i for i in range(100)]
try:
PolynomialMap.least_squares_fit(points, values, 1)
self.assertFalse(True, msg="Should have thrown, not a list")
except ValueError:
pass
values = "icecream"
try:
PolynomialMap.least_squares_fit(points, values, 1)
self.assertFalse(True, msg="Should have thrown, not a list")
except ValueError:
pass
@classmethod
def mice_mc(cls, tku_data, ):
"""
toy mc function
"""
my_map = PolynomialMap(4, cls.coefficients_4d)
tkd_data = [my_map.evaluate(point) for point in tku_data]
return tkd_data
def test_lsf_four_dim(self):
"""
test for maus_cpp.polynomial_map.PolynomialMap.least_squares_fit with
4D input
"""
# tku_data is a list of data, each element being a list like
# [x, px, y, py]
tku_data = [None]*100
for i in range(100):
x_var = random.uniform(-100., 100.)
y_var = random.uniform(-100., 100.)
px_var = random.uniform(-100., 100.)
py_var = random.uniform(-100., 100.)
tku_data[i] = [x_var, px_var, y_var, py_var]
# this is like - we run the tracking, and extract x, px, y, py data
# as per the tku_data
tkd_data = self.mice_mc(tku_data)
# now we calculate the transfer matrix, by means of least squares fit...
fitted_map = PolynomialMap.least_squares_fit(tku_data, tkd_data, 1)
matrix = fitted_map.get_coefficients_as_matrix()
self.assertEqual(len(matrix), len(self.coefficients_4d))
for i in range(len(matrix)):
self.assertEqual(len(matrix[i]), len(self.coefficients_4d[i]))
for j in range(len(matrix[i])):
self.assertLess(abs(matrix[i][j] - self.coefficients_4d[i][j]),
1e-3)
@classmethod
def _str_matrix(cls, matrix):
"""Convert matrix into a formatted string"""
a_string = "\n"
for row in matrix:
for element in row:
a_string += str(round(element, 5)).ljust(8)+" "
a_string += "\n"
return a_string
def test_lsf_four_dim_errors(self):
"""
test for maus_cpp.polynomial_map.PolynomialMap.least_squares_fit with
4D input and errors in the point data
"""
# tku_data is a list of data, each element being a list like
# [x, px, y, py]
n_events = 4000
error_matrix = [[0. for i in range(5)] for j in range(5)]
error_matrix[1][1] = 0.1
error_matrix[2][2] = 10.
error_matrix[3][3] = 0.1
error_matrix[4][4] = 10.
error_mean = [0. for i in range(5)]
tku_data = [None]*n_events
tku_data_err = [None]*n_events
for i in range(n_events):
tku_data[i] = \
[random.uniform(-w, w) for w in [100., 10., 100., 10.]]
err_vec = numpy.random.multivariate_normal(error_mean, error_matrix)
tku_data_err[i] = [tku_data[i][j-1]+err_vec[j] for j in range(1, 5)]
# this is like - we run the tracking, and extract x, px, y, py data
# as per the tku_data
tkd_data = self.mice_mc(tku_data)
fitted_map = PolynomialMap.least_squares_fit(
tku_data_err, tkd_data, 1, error_matrix)
matrix = fitted_map.get_coefficients_as_matrix()
#fitted_map = PolynomialMap.least_squares_fit(tku_data, tkd_data, 1)
#matrix_no_err = fitted_map.get_coefficients_as_matrix()
fitted_map = PolynomialMap.least_squares_fit(
tku_data_err, tkd_data, 1)
matrix_err = fitted_map.get_coefficients_as_matrix()
print "\nError matrix:", self._str_matrix(error_matrix)
print "\nTruth:", self._str_matrix(self.coefficients_4d)
#print "\nMatrix fitted without any errors:", \
# self._str_matrix(matrix_no_err)
print "\nMatrix with errors:", self._str_matrix(matrix_err)
print "\nMatrix with errors but then subtract off systematic:", \
self._str_matrix(matrix)
self.assertEqual(len(matrix), len(self.coefficients_4d))
for i in range(len(matrix)):
self.assertEqual(len(matrix[i]),
len(self.coefficients_4d[i]))
for j in range(len(matrix[i])):
coeff = self.coefficients_4d[i][j]
if abs(coeff) < 1e-9:
continue
self.assertLess(abs((matrix[i][j] - coeff)/coeff), 0.1)
coefficients_4d = [
[0., 1.0, 20.0, 0., 0.0,],
[0., 0.0, 1.0, 0., 0.0,],
[0., 0., 0., 1.0, 20.0,],
[0., 0., 0., 0.0, 1.0,],
]
if __name__ == "__main__":
unittest.main()
|
mice-software/maus
|
tests/py_unit/test_maus_cpp/test_maths/test_polynomial_map.py
|
Python
|
gpl-3.0
| 10,671
|
[
"MOOSE"
] |
d01a2651630457aa62ad70b2407fafa2e400fcc01442ec86791980c013c57f4c
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'RESTool/ui/design_v2.ui'
#
# Created: Fri May 29 22:07:35 2015
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(556, 543)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setSpacing(9)
self.horizontalLayout.setMargin(3)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.MainTabWidget = QtGui.QTabWidget(self.centralwidget)
self.MainTabWidget.setObjectName(_fromUtf8("MainTabWidget"))
self.BackupRestoreMigrateTab = QtGui.QWidget()
self.BackupRestoreMigrateTab.setObjectName(_fromUtf8("BackupRestoreMigrateTab"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.BackupRestoreMigrateTab)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.MainContentLayout = QtGui.QVBoxLayout()
self.MainContentLayout.setObjectName(_fromUtf8("MainContentLayout"))
self.BrowseerPickerLayout = QtGui.QHBoxLayout()
self.BrowseerPickerLayout.setSpacing(5)
self.BrowseerPickerLayout.setObjectName(_fromUtf8("BrowseerPickerLayout"))
self.FirstBrowserLayout = QtGui.QHBoxLayout()
self.FirstBrowserLayout.setSpacing(0)
self.FirstBrowserLayout.setObjectName(_fromUtf8("FirstBrowserLayout"))
self.FirstBrowserVerticalLayout = QtGui.QVBoxLayout()
self.FirstBrowserVerticalLayout.setSpacing(0)
self.FirstBrowserVerticalLayout.setObjectName(_fromUtf8("FirstBrowserVerticalLayout"))
self.StaticFirstBrowserLabel = QtGui.QLabel(self.BackupRestoreMigrateTab)
font = QtGui.QFont()
font.setItalic(True)
self.StaticFirstBrowserLabel.setFont(font)
self.StaticFirstBrowserLabel.setAlignment(QtCore.Qt.AlignCenter)
self.StaticFirstBrowserLabel.setObjectName(_fromUtf8("StaticFirstBrowserLabel"))
self.FirstBrowserVerticalLayout.addWidget(self.StaticFirstBrowserLabel)
self.FirstBrowserNameLayout = QtGui.QHBoxLayout()
self.FirstBrowserNameLayout.setObjectName(_fromUtf8("FirstBrowserNameLayout"))
self.StaticBrowserName = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.StaticBrowserName.setObjectName(_fromUtf8("StaticBrowserName"))
self.FirstBrowserNameLayout.addWidget(self.StaticBrowserName)
self.cboFirstBrowser = QtGui.QComboBox(self.BackupRestoreMigrateTab)
self.cboFirstBrowser.setObjectName(_fromUtf8("cboFirstBrowser"))
self.FirstBrowserNameLayout.addWidget(self.cboFirstBrowser)
self.FirstBrowserNameLayout.setStretch(0, 40)
self.FirstBrowserNameLayout.setStretch(1, 60)
self.FirstBrowserVerticalLayout.addLayout(self.FirstBrowserNameLayout)
self.FirstBrowserProfileLayout = QtGui.QHBoxLayout()
self.FirstBrowserProfileLayout.setObjectName(_fromUtf8("FirstBrowserProfileLayout"))
self.StaticProfileName = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.StaticProfileName.setObjectName(_fromUtf8("StaticProfileName"))
self.FirstBrowserProfileLayout.addWidget(self.StaticProfileName)
self.cboFirstBrowserProfile = QtGui.QComboBox(self.BackupRestoreMigrateTab)
self.cboFirstBrowserProfile.setObjectName(_fromUtf8("cboFirstBrowserProfile"))
self.FirstBrowserProfileLayout.addWidget(self.cboFirstBrowserProfile)
self.FirstBrowserProfileLayout.setStretch(0, 40)
self.FirstBrowserProfileLayout.setStretch(1, 60)
self.FirstBrowserVerticalLayout.addLayout(self.FirstBrowserProfileLayout)
self.FirstBrowserDBFoundLayout = QtGui.QHBoxLayout()
self.FirstBrowserDBFoundLayout.setObjectName(_fromUtf8("FirstBrowserDBFoundLayout"))
self.StaticRESInfo = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.StaticRESInfo.setObjectName(_fromUtf8("StaticRESInfo"))
self.FirstBrowserDBFoundLayout.addWidget(self.StaticRESInfo)
self.FirstBrowserRESLabel = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.FirstBrowserRESLabel.setObjectName(_fromUtf8("FirstBrowserRESLabel"))
self.FirstBrowserDBFoundLayout.addWidget(self.FirstBrowserRESLabel)
self.FirstBrowserDBFoundLayout.setStretch(0, 40)
self.FirstBrowserDBFoundLayout.setStretch(1, 60)
self.FirstBrowserVerticalLayout.addLayout(self.FirstBrowserDBFoundLayout)
self.FirstBrowserVerticalLayout.setStretch(0, 10)
self.FirstBrowserVerticalLayout.setStretch(1, 20)
self.FirstBrowserVerticalLayout.setStretch(2, 20)
self.FirstBrowserVerticalLayout.setStretch(3, 20)
self.FirstBrowserLayout.addLayout(self.FirstBrowserVerticalLayout)
self.BrowseerPickerLayout.addLayout(self.FirstBrowserLayout)
self.StaticBrowserDividerLine = QtGui.QFrame(self.BackupRestoreMigrateTab)
self.StaticBrowserDividerLine.setFrameShape(QtGui.QFrame.VLine)
self.StaticBrowserDividerLine.setFrameShadow(QtGui.QFrame.Sunken)
self.StaticBrowserDividerLine.setObjectName(_fromUtf8("StaticBrowserDividerLine"))
self.BrowseerPickerLayout.addWidget(self.StaticBrowserDividerLine)
self.SecondBrowserLayout = QtGui.QHBoxLayout()
self.SecondBrowserLayout.setSpacing(0)
self.SecondBrowserLayout.setObjectName(_fromUtf8("SecondBrowserLayout"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.StaticSecondBrowserLabel = QtGui.QLabel(self.BackupRestoreMigrateTab)
font = QtGui.QFont()
font.setItalic(True)
self.StaticSecondBrowserLabel.setFont(font)
self.StaticSecondBrowserLabel.setAlignment(QtCore.Qt.AlignCenter)
self.StaticSecondBrowserLabel.setObjectName(_fromUtf8("StaticSecondBrowserLabel"))
self.verticalLayout.addWidget(self.StaticSecondBrowserLabel)
self.SecondBrowserNameLayout = QtGui.QHBoxLayout()
self.SecondBrowserNameLayout.setObjectName(_fromUtf8("SecondBrowserNameLayout"))
self.StaticBrowserName_2 = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.StaticBrowserName_2.setObjectName(_fromUtf8("StaticBrowserName_2"))
self.SecondBrowserNameLayout.addWidget(self.StaticBrowserName_2)
self.cboSecondBrowser = QtGui.QComboBox(self.BackupRestoreMigrateTab)
self.cboSecondBrowser.setObjectName(_fromUtf8("cboSecondBrowser"))
self.SecondBrowserNameLayout.addWidget(self.cboSecondBrowser)
self.SecondBrowserNameLayout.setStretch(0, 40)
self.SecondBrowserNameLayout.setStretch(1, 60)
self.verticalLayout.addLayout(self.SecondBrowserNameLayout)
self.SecondBrowserProfileLayout = QtGui.QHBoxLayout()
self.SecondBrowserProfileLayout.setObjectName(_fromUtf8("SecondBrowserProfileLayout"))
self.StaticProfileName_2 = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.StaticProfileName_2.setObjectName(_fromUtf8("StaticProfileName_2"))
self.SecondBrowserProfileLayout.addWidget(self.StaticProfileName_2)
self.cboSecondBrowserProfile = QtGui.QComboBox(self.BackupRestoreMigrateTab)
self.cboSecondBrowserProfile.setObjectName(_fromUtf8("cboSecondBrowserProfile"))
self.SecondBrowserProfileLayout.addWidget(self.cboSecondBrowserProfile)
self.SecondBrowserProfileLayout.setStretch(0, 40)
self.SecondBrowserProfileLayout.setStretch(1, 60)
self.verticalLayout.addLayout(self.SecondBrowserProfileLayout)
self.SecondBrowserDBFoundLayout = QtGui.QHBoxLayout()
self.SecondBrowserDBFoundLayout.setSpacing(6)
self.SecondBrowserDBFoundLayout.setObjectName(_fromUtf8("SecondBrowserDBFoundLayout"))
self.StaticRESInfo_2 = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.StaticRESInfo_2.setObjectName(_fromUtf8("StaticRESInfo_2"))
self.SecondBrowserDBFoundLayout.addWidget(self.StaticRESInfo_2)
self.SecondBrowserRESLabel = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.SecondBrowserRESLabel.setObjectName(_fromUtf8("SecondBrowserRESLabel"))
self.SecondBrowserDBFoundLayout.addWidget(self.SecondBrowserRESLabel)
self.SecondBrowserDBFoundLayout.setStretch(0, 40)
self.SecondBrowserDBFoundLayout.setStretch(1, 60)
self.verticalLayout.addLayout(self.SecondBrowserDBFoundLayout)
self.verticalLayout.setStretch(0, 10)
self.verticalLayout.setStretch(1, 20)
self.verticalLayout.setStretch(2, 20)
self.verticalLayout.setStretch(3, 20)
self.SecondBrowserLayout.addLayout(self.verticalLayout)
self.BrowseerPickerLayout.addLayout(self.SecondBrowserLayout)
self.MainContentLayout.addLayout(self.BrowseerPickerLayout)
self.StaticBrowserFeaturesLine = QtGui.QFrame(self.BackupRestoreMigrateTab)
self.StaticBrowserFeaturesLine.setFrameShape(QtGui.QFrame.HLine)
self.StaticBrowserFeaturesLine.setFrameShadow(QtGui.QFrame.Sunken)
self.StaticBrowserFeaturesLine.setObjectName(_fromUtf8("StaticBrowserFeaturesLine"))
self.MainContentLayout.addWidget(self.StaticBrowserFeaturesLine)
self.labelMessage = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.labelMessage.setEnabled(True)
self.labelMessage.setStyleSheet(_fromUtf8("color: rgb(255, 0, 0);"))
self.labelMessage.setFrameShape(QtGui.QFrame.NoFrame)
self.labelMessage.setAlignment(QtCore.Qt.AlignCenter)
self.labelMessage.setObjectName(_fromUtf8("labelMessage"))
self.MainContentLayout.addWidget(self.labelMessage)
self.ApplicationActionsLayout = QtGui.QHBoxLayout()
self.ApplicationActionsLayout.setObjectName(_fromUtf8("ApplicationActionsLayout"))
self.BackupsLayout = QtGui.QVBoxLayout()
self.BackupsLayout.setSpacing(6)
self.BackupsLayout.setObjectName(_fromUtf8("BackupsLayout"))
self.StaticLabelMigrating = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.StaticLabelMigrating.setObjectName(_fromUtf8("StaticLabelMigrating"))
self.BackupsLayout.addWidget(self.StaticLabelMigrating)
self.btnFirstToSecond = QtGui.QPushButton(self.BackupRestoreMigrateTab)
self.btnFirstToSecond.setEnabled(False)
self.btnFirstToSecond.setObjectName(_fromUtf8("btnFirstToSecond"))
self.BackupsLayout.addWidget(self.btnFirstToSecond)
self.btnSecondToFirst = QtGui.QPushButton(self.BackupRestoreMigrateTab)
self.btnSecondToFirst.setEnabled(False)
self.btnSecondToFirst.setObjectName(_fromUtf8("btnSecondToFirst"))
self.BackupsLayout.addWidget(self.btnSecondToFirst)
self.StaticDivider = QtGui.QFrame(self.BackupRestoreMigrateTab)
self.StaticDivider.setFrameShape(QtGui.QFrame.HLine)
self.StaticDivider.setFrameShadow(QtGui.QFrame.Sunken)
self.StaticDivider.setObjectName(_fromUtf8("StaticDivider"))
self.BackupsLayout.addWidget(self.StaticDivider)
self.StaticLabelBackups_2 = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.StaticLabelBackups_2.setObjectName(_fromUtf8("StaticLabelBackups_2"))
self.BackupsLayout.addWidget(self.StaticLabelBackups_2)
self.btnBackupFirst = QtGui.QPushButton(self.BackupRestoreMigrateTab)
self.btnBackupFirst.setEnabled(False)
self.btnBackupFirst.setObjectName(_fromUtf8("btnBackupFirst"))
self.BackupsLayout.addWidget(self.btnBackupFirst)
self.btnBackupSecond = QtGui.QPushButton(self.BackupRestoreMigrateTab)
self.btnBackupSecond.setEnabled(False)
self.btnBackupSecond.setObjectName(_fromUtf8("btnBackupSecond"))
self.BackupsLayout.addWidget(self.btnBackupSecond)
self.btnRestoreToFirst = QtGui.QPushButton(self.BackupRestoreMigrateTab)
self.btnRestoreToFirst.setEnabled(False)
self.btnRestoreToFirst.setObjectName(_fromUtf8("btnRestoreToFirst"))
self.BackupsLayout.addWidget(self.btnRestoreToFirst)
self.btnRestoreToSecond = QtGui.QPushButton(self.BackupRestoreMigrateTab)
self.btnRestoreToSecond.setEnabled(False)
self.btnRestoreToSecond.setObjectName(_fromUtf8("btnRestoreToSecond"))
self.BackupsLayout.addWidget(self.btnRestoreToSecond)
self.VersionLayout = QtGui.QHBoxLayout()
self.VersionLayout.setSpacing(0)
self.VersionLayout.setContentsMargins(-1, -1, -1, 0)
self.VersionLayout.setObjectName(_fromUtf8("VersionLayout"))
self.lblVersion = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.lblVersion.setWordWrap(True)
self.lblVersion.setObjectName(_fromUtf8("lblVersion"))
self.VersionLayout.addWidget(self.lblVersion)
self.StaticLabelVersion = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.StaticLabelVersion.setObjectName(_fromUtf8("StaticLabelVersion"))
self.VersionLayout.addWidget(self.StaticLabelVersion)
self.BackupsLayout.addLayout(self.VersionLayout)
self.lblUpdateAvailable = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.lblUpdateAvailable.setEnabled(True)
self.lblUpdateAvailable.setAlignment(QtCore.Qt.AlignCenter)
self.lblUpdateAvailable.setObjectName(_fromUtf8("lblUpdateAvailable"))
self.BackupsLayout.addWidget(self.lblUpdateAvailable)
self.ApplicationActionsLayout.addLayout(self.BackupsLayout)
self.MainFeaturesLayout = QtGui.QVBoxLayout()
self.MainFeaturesLayout.setObjectName(_fromUtf8("MainFeaturesLayout"))
self.StaticLabelBackups = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.StaticLabelBackups.setObjectName(_fromUtf8("StaticLabelBackups"))
self.MainFeaturesLayout.addWidget(self.StaticLabelBackups)
self.listBackups = QtGui.QListWidget(self.BackupRestoreMigrateTab)
self.listBackups.setObjectName(_fromUtf8("listBackups"))
self.MainFeaturesLayout.addWidget(self.listBackups)
self.btnDeleteBackup = QtGui.QPushButton(self.BackupRestoreMigrateTab)
self.btnDeleteBackup.setObjectName(_fromUtf8("btnDeleteBackup"))
self.MainFeaturesLayout.addWidget(self.btnDeleteBackup)
self.ApplicationActionsLayout.addLayout(self.MainFeaturesLayout)
self.ApplicationActionsLayout.setStretch(0, 60)
self.ApplicationActionsLayout.setStretch(1, 40)
self.MainContentLayout.addLayout(self.ApplicationActionsLayout)
self.MainContentLayout.setStretch(0, 30)
self.MainContentLayout.setStretch(3, 70)
self.verticalLayout_2.addLayout(self.MainContentLayout)
self.MainTabWidget.addTab(self.BackupRestoreMigrateTab, _fromUtf8(""))
self.SettingsTab = QtGui.QWidget()
self.SettingsTab.setObjectName(_fromUtf8("SettingsTab"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.SettingsTab)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.BackupFolderLayout = QtGui.QVBoxLayout()
self.BackupFolderLayout.setObjectName(_fromUtf8("BackupFolderLayout"))
self.BackupFolderLayout_2 = QtGui.QVBoxLayout()
self.BackupFolderLayout_2.setObjectName(_fromUtf8("BackupFolderLayout_2"))
self.StaticBackupFolderDescLabel = QtGui.QLabel(self.SettingsTab)
self.StaticBackupFolderDescLabel.setObjectName(_fromUtf8("StaticBackupFolderDescLabel"))
self.BackupFolderLayout_2.addWidget(self.StaticBackupFolderDescLabel)
self.BackupFolderLayout_3 = QtGui.QHBoxLayout()
self.BackupFolderLayout_3.setObjectName(_fromUtf8("BackupFolderLayout_3"))
self.StaticBackupFolderLabel = QtGui.QLabel(self.SettingsTab)
self.StaticBackupFolderLabel.setObjectName(_fromUtf8("StaticBackupFolderLabel"))
self.BackupFolderLayout_3.addWidget(self.StaticBackupFolderLabel)
self.lneBackupFolder = QtGui.QLineEdit(self.SettingsTab)
self.lneBackupFolder.setObjectName(_fromUtf8("lneBackupFolder"))
self.BackupFolderLayout_3.addWidget(self.lneBackupFolder)
self.btnBrowseBackupsFolder = QtGui.QPushButton(self.SettingsTab)
self.btnBrowseBackupsFolder.setObjectName(_fromUtf8("btnBrowseBackupsFolder"))
self.BackupFolderLayout_3.addWidget(self.btnBrowseBackupsFolder)
self.BackupFolderLayout_2.addLayout(self.BackupFolderLayout_3)
self.chkAutomaticBakFolder = QtGui.QCheckBox(self.SettingsTab)
self.chkAutomaticBakFolder.setObjectName(_fromUtf8("chkAutomaticBakFolder"))
self.BackupFolderLayout_2.addWidget(self.chkAutomaticBakFolder)
self.BackupFolderLayout.addLayout(self.BackupFolderLayout_2)
self.verticalLayout_4.addLayout(self.BackupFolderLayout)
self.SettingsDividerLine = QtGui.QFrame(self.SettingsTab)
self.SettingsDividerLine.setFrameShape(QtGui.QFrame.HLine)
self.SettingsDividerLine.setFrameShadow(QtGui.QFrame.Sunken)
self.SettingsDividerLine.setObjectName(_fromUtf8("SettingsDividerLine"))
self.verticalLayout_4.addWidget(self.SettingsDividerLine)
self.BackupDateFormatLayout = QtGui.QVBoxLayout()
self.BackupDateFormatLayout.setObjectName(_fromUtf8("BackupDateFormatLayout"))
self.StaticBackupDateFormatDescLabel = QtGui.QLabel(self.SettingsTab)
self.StaticBackupDateFormatDescLabel.setObjectName(_fromUtf8("StaticBackupDateFormatDescLabel"))
self.BackupDateFormatLayout.addWidget(self.StaticBackupDateFormatDescLabel)
self.BackupDateFormatLayout_2 = QtGui.QHBoxLayout()
self.BackupDateFormatLayout_2.setObjectName(_fromUtf8("BackupDateFormatLayout_2"))
self.StaticBackupDateFormatLabel = QtGui.QLabel(self.SettingsTab)
self.StaticBackupDateFormatLabel.setObjectName(_fromUtf8("StaticBackupDateFormatLabel"))
self.BackupDateFormatLayout_2.addWidget(self.StaticBackupDateFormatLabel)
self.lneBackupTimeFormat = QtGui.QLineEdit(self.SettingsTab)
self.lneBackupTimeFormat.setObjectName(_fromUtf8("lneBackupTimeFormat"))
self.BackupDateFormatLayout_2.addWidget(self.lneBackupTimeFormat)
self.BackupDateFormatLayout.addLayout(self.BackupDateFormatLayout_2)
self.verticalLayout_4.addLayout(self.BackupDateFormatLayout)
self.SettingsDividerLine_2 = QtGui.QFrame(self.SettingsTab)
self.SettingsDividerLine_2.setFrameShape(QtGui.QFrame.HLine)
self.SettingsDividerLine_2.setFrameShadow(QtGui.QFrame.Sunken)
self.SettingsDividerLine_2.setObjectName(_fromUtf8("SettingsDividerLine_2"))
self.verticalLayout_4.addWidget(self.SettingsDividerLine_2)
self.PortableSettingsFormatLayout = QtGui.QVBoxLayout()
self.PortableSettingsFormatLayout.setObjectName(_fromUtf8("PortableSettingsFormatLayout"))
self.StaticPortableSettingsDescLabel = QtGui.QLabel(self.SettingsTab)
self.StaticPortableSettingsDescLabel.setWordWrap(True)
self.StaticPortableSettingsDescLabel.setObjectName(_fromUtf8("StaticPortableSettingsDescLabel"))
self.PortableSettingsFormatLayout.addWidget(self.StaticPortableSettingsDescLabel)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.chkPortableSettings = QtGui.QCheckBox(self.SettingsTab)
self.chkPortableSettings.setChecked(True)
self.chkPortableSettings.setObjectName(_fromUtf8("chkPortableSettings"))
self.horizontalLayout_2.addWidget(self.chkPortableSettings)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.btnRemoveSystemConfig = QtGui.QPushButton(self.SettingsTab)
self.btnRemoveSystemConfig.setObjectName(_fromUtf8("btnRemoveSystemConfig"))
self.horizontalLayout_2.addWidget(self.btnRemoveSystemConfig)
self.btnRemoveLocalConfig = QtGui.QPushButton(self.SettingsTab)
self.btnRemoveLocalConfig.setObjectName(_fromUtf8("btnRemoveLocalConfig"))
self.horizontalLayout_2.addWidget(self.btnRemoveLocalConfig)
self.PortableSettingsFormatLayout.addLayout(self.horizontalLayout_2)
self.verticalLayout_4.addLayout(self.PortableSettingsFormatLayout)
self.SettingsDividerLine_3 = QtGui.QFrame(self.SettingsTab)
self.SettingsDividerLine_3.setFrameShape(QtGui.QFrame.HLine)
self.SettingsDividerLine_3.setFrameShadow(QtGui.QFrame.Sunken)
self.SettingsDividerLine_3.setObjectName(_fromUtf8("SettingsDividerLine_3"))
self.verticalLayout_4.addWidget(self.SettingsDividerLine_3)
self.DebuggingSettingsLayout = QtGui.QVBoxLayout()
self.DebuggingSettingsLayout.setObjectName(_fromUtf8("DebuggingSettingsLayout"))
self.StaticDebuggingSettingsLabel = QtGui.QLabel(self.SettingsTab)
self.StaticDebuggingSettingsLabel.setObjectName(_fromUtf8("StaticDebuggingSettingsLabel"))
self.DebuggingSettingsLayout.addWidget(self.StaticDebuggingSettingsLabel)
self.DebuggingStatusLabel = QtGui.QLabel(self.SettingsTab)
self.DebuggingStatusLabel.setObjectName(_fromUtf8("DebuggingStatusLabel"))
self.DebuggingSettingsLayout.addWidget(self.DebuggingStatusLabel)
self.DebuggingSettingsLayout_2 = QtGui.QHBoxLayout()
self.DebuggingSettingsLayout_2.setObjectName(_fromUtf8("DebuggingSettingsLayout_2"))
self.btnEnableLogging = QtGui.QPushButton(self.SettingsTab)
self.btnEnableLogging.setObjectName(_fromUtf8("btnEnableLogging"))
self.DebuggingSettingsLayout_2.addWidget(self.btnEnableLogging)
self.btnDisableLogging = QtGui.QPushButton(self.SettingsTab)
self.btnDisableLogging.setObjectName(_fromUtf8("btnDisableLogging"))
self.DebuggingSettingsLayout_2.addWidget(self.btnDisableLogging)
self.btnSubmitBug = QtGui.QPushButton(self.SettingsTab)
self.btnSubmitBug.setObjectName(_fromUtf8("btnSubmitBug"))
self.DebuggingSettingsLayout_2.addWidget(self.btnSubmitBug)
self.DebuggingSettingsLayout.addLayout(self.DebuggingSettingsLayout_2)
self.verticalLayout_4.addLayout(self.DebuggingSettingsLayout)
self.SettingsDividerLine_4 = QtGui.QFrame(self.SettingsTab)
self.SettingsDividerLine_4.setFrameShape(QtGui.QFrame.HLine)
self.SettingsDividerLine_4.setFrameShadow(QtGui.QFrame.Sunken)
self.SettingsDividerLine_4.setObjectName(_fromUtf8("SettingsDividerLine_4"))
self.verticalLayout_4.addWidget(self.SettingsDividerLine_4)
self.AutomaticUpdateLayer = QtGui.QVBoxLayout()
self.AutomaticUpdateLayer.setObjectName(_fromUtf8("AutomaticUpdateLayer"))
self.StaticAutomaticUpdatesDescLabel = QtGui.QLabel(self.SettingsTab)
self.StaticAutomaticUpdatesDescLabel.setWordWrap(True)
self.StaticAutomaticUpdatesDescLabel.setObjectName(_fromUtf8("StaticAutomaticUpdatesDescLabel"))
self.AutomaticUpdateLayer.addWidget(self.StaticAutomaticUpdatesDescLabel)
self.chkAutomaticUpdates = QtGui.QCheckBox(self.SettingsTab)
self.chkAutomaticUpdates.setChecked(False)
self.chkAutomaticUpdates.setObjectName(_fromUtf8("chkAutomaticUpdates"))
self.AutomaticUpdateLayer.addWidget(self.chkAutomaticUpdates)
self.verticalLayout_4.addLayout(self.AutomaticUpdateLayer)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem1)
self.SettingsButtonsLayout = QtGui.QHBoxLayout()
self.SettingsButtonsLayout.setObjectName(_fromUtf8("SettingsButtonsLayout"))
self.btnRestoreSettings = QtGui.QPushButton(self.SettingsTab)
self.btnRestoreSettings.setObjectName(_fromUtf8("btnRestoreSettings"))
self.SettingsButtonsLayout.addWidget(self.btnRestoreSettings)
self.btnSaveSettings = QtGui.QPushButton(self.SettingsTab)
self.btnSaveSettings.setObjectName(_fromUtf8("btnSaveSettings"))
self.SettingsButtonsLayout.addWidget(self.btnSaveSettings)
self.verticalLayout_4.addLayout(self.SettingsButtonsLayout)
self.MainTabWidget.addTab(self.SettingsTab, _fromUtf8(""))
self.horizontalLayout.addWidget(self.MainTabWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.MainTabWidget.setCurrentIndex(0)
QtCore.QObject.connect(self.chkAutomaticBakFolder, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.lneBackupFolder.setDisabled)
QtCore.QObject.connect(self.chkAutomaticBakFolder, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.btnBrowseBackupsFolder.setDisabled)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "RESTool", None))
self.StaticFirstBrowserLabel.setText(_translate("MainWindow", "First Browser", None))
self.StaticBrowserName.setText(_translate("MainWindow", "Browser Name:", None))
self.StaticProfileName.setText(_translate("MainWindow", "Profile Name:", None))
self.StaticRESInfo.setText(_translate("MainWindow", "RES DB found:", None))
self.FirstBrowserRESLabel.setText(_translate("MainWindow", "N/A", None))
self.StaticSecondBrowserLabel.setText(_translate("MainWindow", "Second Browser", None))
self.StaticBrowserName_2.setText(_translate("MainWindow", "Browser Name:", None))
self.StaticProfileName_2.setText(_translate("MainWindow", "Profile Name:", None))
self.StaticRESInfo_2.setText(_translate("MainWindow", "RES DB found:", None))
self.SecondBrowserRESLabel.setText(_translate("MainWindow", "N/A", None))
self.labelMessage.setText(_translate("MainWindow", "<html><head/><body><p>Warning message</p></body></html>", None))
self.StaticLabelMigrating.setText(_translate("MainWindow", "Migrating existing data", None))
self.btnFirstToSecond.setText(_translate("MainWindow", "First browser to the second browser", None))
self.btnSecondToFirst.setText(_translate("MainWindow", "Second browser to the first browser", None))
self.StaticLabelBackups_2.setText(_translate("MainWindow", "RES database backups", None))
self.btnBackupFirst.setText(_translate("MainWindow", "Backup first browser", None))
self.btnBackupSecond.setText(_translate("MainWindow", "Backup second browser", None))
self.btnRestoreToFirst.setText(_translate("MainWindow", "Restore selected backup to the first browser", None))
self.btnRestoreToSecond.setText(_translate("MainWindow", "Restore selected backup to the second browser", None))
self.lblVersion.setText(_translate("MainWindow", "<html><head/><body><p>Version 0.2.1</p></body></html>", None))
self.StaticLabelVersion.setText(_translate("MainWindow", "<html><head/><body><p>Website: <a href=\"http://nikola-k.github.io/RESTool/\"><span style=\" text-decoration: underline; color:#0000ff;\">nikola-k.github.io/RESTool</span></a></p></body></html>", None))
self.lblUpdateAvailable.setText(_translate("MainWindow", "<html><head/><body><p>Update available. Visit website for more info.</p></body></html>", None))
self.StaticLabelBackups.setText(_translate("MainWindow", "Available Backups", None))
self.btnDeleteBackup.setText(_translate("MainWindow", "Delete Selected Backup", None))
self.MainTabWidget.setTabText(self.MainTabWidget.indexOf(self.BackupRestoreMigrateTab), _translate("MainWindow", "RESTool", None))
self.StaticBackupFolderDescLabel.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-style:italic; color:#585858;\">Folder where the backups will be saved, default: res_backups</span></p></body></html>", None))
self.StaticBackupFolderLabel.setText(_translate("MainWindow", "Backup Folder", None))
self.lneBackupFolder.setText(_translate("MainWindow", "res_backups", None))
self.btnBrowseBackupsFolder.setText(_translate("MainWindow", "Browse", None))
self.chkAutomaticBakFolder.setText(_translate("MainWindow", "Use automatic system specific app directory", None))
self.StaticBackupDateFormatDescLabel.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-style:italic; color:#535353;\">Custom backup date/time format, default: %Y-%m-%d - </span><a href=\"https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior\"><span style=\" font-style:italic; text-decoration: underline; color:#0055ff;\">available variables</span></a></p></body></html>", None))
self.StaticBackupDateFormatLabel.setText(_translate("MainWindow", "Backup time format:", None))
self.lneBackupTimeFormat.setText(_translate("MainWindow", "%Y-%m-%d", None))
self.StaticPortableSettingsDescLabel.setText(_translate("MainWindow", "<html><head/><body><p>Portable RES settings storage (settings.json with your custom config will be stored next to app if checked, otherwise it will be placed in auto-detected system specific configuration directory)</p></body></html>", None))
self.chkPortableSettings.setText(_translate("MainWindow", "Portable settings", None))
self.btnRemoveSystemConfig.setText(_translate("MainWindow", "Remove System Settings File", None))
self.btnRemoveLocalConfig.setText(_translate("MainWindow", "Remove Local Settings File", None))
self.StaticDebuggingSettingsLabel.setText(_translate("MainWindow", "Debugging/Log reporting functions:", None))
self.DebuggingStatusLabel.setText(_translate("MainWindow", "Current logging status: Disabled", None))
self.btnEnableLogging.setText(_translate("MainWindow", "Enable Logging", None))
self.btnDisableLogging.setText(_translate("MainWindow", "Disable Logging", None))
self.btnSubmitBug.setText(_translate("MainWindow", "Submit a bug report", None))
self.StaticAutomaticUpdatesDescLabel.setText(_translate("MainWindow", "<html><head/><body><p>Automatic checking for new version. No personal information is sent to the server.</p></body></html>", None))
self.chkAutomaticUpdates.setText(_translate("MainWindow", "Check for updates on startup.", None))
self.btnRestoreSettings.setText(_translate("MainWindow", "Restore settings to default", None))
self.btnSaveSettings.setText(_translate("MainWindow", "Save current settings", None))
self.MainTabWidget.setTabText(self.MainTabWidget.indexOf(self.SettingsTab), _translate("MainWindow", "Settings", None))
|
Nikola-K/RESTool
|
RESTool/restoolgui.py
|
Python
|
apache-2.0
| 31,410
|
[
"VisIt"
] |
1ca841335d595c46cc82c809af431e0066d4ad9097ad3e910176b150f4578a12
|
import unittest
import numpy as np
import pysal.lib
from pysal.model.spreg.twosls import BaseTSLS, TSLS
from pysal.lib.common import RTOL
class TestBaseTSLS(unittest.TestCase):
def setUp(self):
db = pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),'r')
self.y = np.array(db.by_col("CRIME"))
self.y = np.reshape(self.y, (49,1))
self.X = []
self.X.append(db.by_col("INC"))
self.X = np.array(self.X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.yd = []
self.yd.append(db.by_col("HOVAL"))
self.yd = np.array(self.yd).T
self.q = []
self.q.append(db.by_col("DISCBD"))
self.q = np.array(self.q).T
def test_basic(self):
reg = BaseTSLS(self.y, self.X, self.yd, self.q)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
h_0 = np.array([ 1. , 19.531, 5.03 ])
np.testing.assert_allclose(reg.h[0], h_0)
hth = np.array([[ 49. , 704.371999 , 139.75 ],
[ 704.371999 , 11686.67338121, 2246.12800625],
[ 139.75 , 2246.12800625, 498.5851 ]])
np.testing.assert_allclose(reg.hth, hth,RTOL)
hthi = np.array([[ 0.1597275 , -0.00762011, -0.01044191],
[-0.00762011, 0.00100135, -0.0023752 ],
[-0.01044191, -0.0023752 , 0.01563276]])
np.testing.assert_allclose(reg.hthi, hthi,RTOL)
self.assertEqual(reg.k, 3)
self.assertEqual(reg.kstar, 1)
np.testing.assert_allclose(reg.mean_y, 35.128823897959187,RTOL)
self.assertEqual(reg.n, 49)
pfora1a2 = np.array([[ 9.58156106, -0.22744226, -0.13820537],
[ 0.02580142, 0.08226331, -0.03143731],
[-3.13896453, -0.33487872, 0.20690965]])
np.testing.assert_allclose(reg.pfora1a2, pfora1a2,RTOL)
predy_5 = np.array([[-28.68949467], [ 28.99484984], [ 55.07344824], [ 38.26609504], [ 57.57145851]])
np.testing.assert_allclose(reg.predy[0:5], predy_5,RTOL)
q_5 = np.array([[ 5.03], [ 4.27], [ 3.89], [ 3.7 ], [ 2.83]])
np.testing.assert_array_equal(reg.q[0:5], q_5)
np.testing.assert_allclose(reg.sig2n_k, 587.56797852699822,RTOL)
np.testing.assert_allclose(reg.sig2n, 551.5944288212637,RTOL)
np.testing.assert_allclose(reg.sig2, 551.5944288212637,RTOL)
np.testing.assert_allclose(reg.std_y, 16.732092091229699,RTOL)
u_5 = np.array([[ 44.41547467], [-10.19309584], [-24.44666724], [ -5.87833504], [ -6.83994851]])
np.testing.assert_allclose(reg.u[0:5], u_5,RTOL)
np.testing.assert_allclose(reg.utu, 27028.127012241919,RTOL)
varb = np.array([[ 0.41526237, 0.01879906, -0.01730372],
[ 0.01879906, 0.00362823, -0.00184604],
[-0.01730372, -0.00184604, 0.0011406 ]])
np.testing.assert_allclose(reg.varb, varb,RTOL)
vm = np.array([[ 229.05640809, 10.36945783, -9.54463414],
[ 10.36945783, 2.0013142 , -1.01826408],
[ -9.54463414, -1.01826408, 0.62914915]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
x_0 = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x[0], x_0,RTOL)
y_5 = np.array([[ 15.72598 ], [ 18.801754], [ 30.626781], [ 32.38776 ], [ 50.73151 ]])
np.testing.assert_allclose(reg.y[0:5], y_5,RTOL)
yend_5 = np.array([[ 80.467003], [ 44.567001], [ 26.35 ], [ 33.200001], [ 23.225 ]])
np.testing.assert_allclose(reg.yend[0:5], yend_5,RTOL)
z_0 = np.array([ 1. , 19.531 , 80.467003])
np.testing.assert_allclose(reg.z[0], z_0,RTOL)
zthhthi = np.array([[ 1.00000000e+00, -1.66533454e-16, 4.44089210e-16],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
[ 1.26978671e+01, 1.05598709e+00, 3.70212359e+00]])
#np.testing.assert_allclose(reg.zthhthi, zthhthi, RTOL)
np.testing.assert_array_almost_equal(reg.zthhthi, zthhthi, 7)
def test_n_k(self):
reg = BaseTSLS(self.y, self.X, self.yd, self.q, sig2n_k=True)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array([[ 243.99486949, 11.04572682, -10.16711028],
[ 11.04572682, 2.13183469, -1.08467261],
[ -10.16711028, -1.08467261, 0.67018062]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
def test_white(self):
reg = BaseTSLS(self.y, self.X, self.yd, self.q, robust='white')
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array([[ 208.27139316, 15.6687805 , -11.53686154],
[ 15.6687805 , 2.26882747, -1.30312033],
[ -11.53686154, -1.30312033, 0.81940656]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
def test_hac(self):
gwk = pysal.lib.weights.Kernel.from_shapefile(pysal.lib.examples.get_path('columbus.shp'),k=15,function='triangular', fixed=False)
reg = BaseTSLS(self.y, self.X, self.yd, self.q, robust='hac', gwk=gwk)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array([[ 231.07254978, 15.42050291, -11.3941033 ],
[ 15.01376346, 1.92422887, -1.11865505],
[ -11.34381641, -1.1279227 , 0.72053806]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
class TestTSLS(unittest.TestCase):
def setUp(self):
db = pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),'r')
self.y = np.array(db.by_col("CRIME"))
self.y = np.reshape(self.y, (49,1))
self.X = []
self.X.append(db.by_col("INC"))
self.X = np.array(self.X).T
self.yd = []
self.yd.append(db.by_col("HOVAL"))
self.yd = np.array(self.yd).T
self.q = []
self.q.append(db.by_col("DISCBD"))
self.q = np.array(self.q).T
def test_basic(self):
reg = TSLS(self.y, self.X, self.yd, self.q)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
h_0 = np.array([ 1. , 19.531, 5.03 ])
np.testing.assert_allclose(reg.h[0], h_0)
hth = np.array([[ 49. , 704.371999 , 139.75 ],
[ 704.371999 , 11686.67338121, 2246.12800625],
[ 139.75 , 2246.12800625, 498.5851 ]])
np.testing.assert_allclose(reg.hth, hth,RTOL)
hthi = np.array([[ 0.1597275 , -0.00762011, -0.01044191],
[-0.00762011, 0.00100135, -0.0023752 ],
[-0.01044191, -0.0023752 , 0.01563276]])
np.testing.assert_allclose(reg.hthi, hthi,RTOL)
self.assertEqual(reg.k, 3)
self.assertEqual(reg.kstar, 1)
np.testing.assert_allclose(reg.mean_y, 35.128823897959187,RTOL)
self.assertEqual(reg.n, 49)
pfora1a2 = np.array([[ 9.58156106, -0.22744226, -0.13820537],
[ 0.02580142, 0.08226331, -0.03143731],
[-3.13896453, -0.33487872, 0.20690965]])
np.testing.assert_allclose(reg.pfora1a2, pfora1a2,RTOL)
predy_5 = np.array([[-28.68949467], [ 28.99484984], [ 55.07344824], [ 38.26609504], [ 57.57145851]])
np.testing.assert_allclose(reg.predy[0:5], predy_5,RTOL)
q_5 = np.array([[ 5.03], [ 4.27], [ 3.89], [ 3.7 ], [ 2.83]])
np.testing.assert_array_equal(reg.q[0:5], q_5)
np.testing.assert_allclose(reg.sig2n_k, 587.56797852699822,RTOL)
np.testing.assert_allclose(reg.sig2n, 551.5944288212637,RTOL)
np.testing.assert_allclose(reg.sig2, 551.5944288212637,RTOL)
np.testing.assert_allclose(reg.std_y, 16.732092091229699,RTOL)
u_5 = np.array([[ 44.41547467], [-10.19309584], [-24.44666724], [ -5.87833504], [ -6.83994851]])
np.testing.assert_allclose(reg.u[0:5], u_5,RTOL)
np.testing.assert_allclose(reg.utu, 27028.127012241919,RTOL)
varb = np.array([[ 0.41526237, 0.01879906, -0.01730372],
[ 0.01879906, 0.00362823, -0.00184604],
[-0.01730372, -0.00184604, 0.0011406 ]])
np.testing.assert_allclose(reg.varb, varb,RTOL)
vm = np.array([[ 229.05640809, 10.36945783, -9.54463414],
[ 10.36945783, 2.0013142 , -1.01826408],
[ -9.54463414, -1.01826408, 0.62914915]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
x_0 = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x[0], x_0,RTOL)
y_5 = np.array([[ 15.72598 ], [ 18.801754], [ 30.626781], [ 32.38776 ], [ 50.73151 ]])
np.testing.assert_allclose(reg.y[0:5], y_5,RTOL)
yend_5 = np.array([[ 80.467003], [ 44.567001], [ 26.35 ], [ 33.200001], [ 23.225 ]])
np.testing.assert_allclose(reg.yend[0:5], yend_5,RTOL)
z_0 = np.array([ 1. , 19.531 , 80.467003])
np.testing.assert_allclose(reg.z[0], z_0,RTOL)
zthhthi = np.array([[ 1.00000000e+00, -1.66533454e-16, 4.44089210e-16],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
[ 1.26978671e+01, 1.05598709e+00, 3.70212359e+00]])
#np.testing.assert_allclose(reg.zthhthi, zthhthi,RTOL)
np.testing.assert_array_almost_equal(reg.zthhthi, zthhthi, 7)
np.testing.assert_allclose(reg.pr2, 0.27936137128173893,RTOL)
z_stat = np.array([[ 5.84526447e+00, 5.05764078e-09],
[ 3.67601567e-01, 7.13170346e-01],
[ -1.99468913e+00, 4.60767956e-02]])
np.testing.assert_allclose(reg.z_stat, z_stat,RTOL)
title = 'TWO STAGE LEAST SQUARES'
self.assertEqual(reg.title, title)
def test_n_k(self):
reg = TSLS(self.y, self.X, self.yd, self.q, sig2n_k=True)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array([[ 243.99486949, 11.04572682, -10.16711028],
[ 11.04572682, 2.13183469, -1.08467261],
[ -10.16711028, -1.08467261, 0.67018062]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
def test_white(self):
reg = TSLS(self.y, self.X, self.yd, self.q, robust='white')
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array([[ 208.27139316, 15.6687805 , -11.53686154],
[ 15.6687805 , 2.26882747, -1.30312033],
[ -11.53686154, -1.30312033, 0.81940656]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
self.assertEqual(reg.robust, 'white')
def test_hac(self):
gwk = pysal.lib.weights.Kernel.from_shapefile(pysal.lib.examples.get_path('columbus.shp'),k=5,function='triangular', fixed=False)
reg = TSLS(self.y, self.X, self.yd, self.q, robust='hac', gwk=gwk)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array([[ 225.0795089 , 17.11660041, -12.22448566],
[ 17.67097154, 2.47483461, -1.4183641 ],
[ -12.45093722, -1.40495464, 0.8700441 ]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
self.assertEqual(reg.robust, 'hac')
def test_spatial(self):
w = pysal.lib.weights.Queen.from_shapefile(pysal.lib.examples.get_path('columbus.shp'))
reg = TSLS(self.y, self.X, self.yd, self.q, spat_diag=True, w=w)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array([[ 229.05640809, 10.36945783, -9.54463414],
[ 10.36945783, 2.0013142 , -1.01826408],
[ -9.54463414, -1.01826408, 0.62914915]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
ak_test = np.array([ 1.16816972, 0.27977763])
np.testing.assert_allclose(reg.ak_test, ak_test,RTOL)
def test_names(self):
w = pysal.lib.weights.Queen.from_shapefile(pysal.lib.examples.get_path('columbus.shp'))
gwk = pysal.lib.weights.Kernel.from_shapefile(pysal.lib.examples.get_path('columbus.shp'),k=5,function='triangular', fixed=False)
name_x = ['inc']
name_y = 'crime'
name_yend = ['hoval']
name_q = ['discbd']
name_w = 'queen'
name_gwk = 'k=5'
name_ds = 'columbus'
reg = TSLS(self.y, self.X, self.yd, self.q,
spat_diag=True, w=w, robust='hac', gwk=gwk,
name_x=name_x, name_y=name_y, name_q=name_q, name_w=name_w,
name_yend=name_yend, name_gwk=name_gwk, name_ds=name_ds)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array([[ 225.0795089 , 17.11660041, -12.22448566],
[ 17.67097154, 2.47483461, -1.4183641 ],
[ -12.45093722, -1.40495464, 0.8700441 ]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
self.assertListEqual(reg.name_x, ['CONSTANT']+name_x)
self.assertListEqual(reg.name_yend, name_yend)
self.assertListEqual(reg.name_q, name_q)
self.assertEqual(reg.name_y, name_y)
self.assertEqual(reg.name_w, name_w)
self.assertEqual(reg.name_gwk, name_gwk)
self.assertEqual(reg.name_ds, name_ds)
if __name__ == '__main__':
unittest.main()
|
lixun910/pysal
|
pysal/model/spreg/tests/test_twosls.py
|
Python
|
bsd-3-clause
| 14,366
|
[
"COLUMBUS"
] |
196ba3d7484565af50bf5048b5067fc29000da7523cc36649f18da2d147bc110
|
from ase import Atoms
from ase.io import write
from gpaw import GPAW, Mixer
from ase.data.molecules import molecule
CO = molecule('CO')
CO.set_cell((6., 6., 6.))
CO.center()
calc = GPAW(h=0.2,
nbands=8,
mixer=Mixer(beta=0.1, nmaxold=5, weight=50.0),
txt='CO.txt')
CO.set_calculator(calc)
CO.get_potential_energy()
# Write wave functions to gpw file
calc.write('CO.gpw', mode='all')
# Generate cube-files of the orbitals.
for n in range(calc.get_number_of_bands()):
wf = calc.get_pseudo_wave_function(band=n)
write('CO%d.cube' % n, CO, data=wf)
|
qsnake/gpaw
|
doc/exercises/wavefunctions/CO.py
|
Python
|
gpl-3.0
| 593
|
[
"ASE",
"GPAW"
] |
770c4923f034e49d8d6aaabd0cce8e0a86c26c407a94a3b617b496e4f15998f3
|
from ..utils import *
##
# Minions
class AT_006:
"Dalaran Aspirant"
inspire = Buff(SELF, "AT_006e")
AT_006e = buff(spellpower=1)
class AT_007:
"Spellslinger"
play = Give(ALL_PLAYERS, RandomSpell())
class AT_008:
"Coldarra Drake"
update = Refresh(FRIENDLY_HERO_POWER, {GameTag.HEROPOWER_ADDITIONAL_ACTIVATIONS: SET(-1)})
class AT_009:
"Rhonin"
deathrattle = Give(CONTROLLER, "EX1_277") * 3
##
# Spells
class AT_001:
"Flame Lance"
play = Hit(TARGET, 8)
class AT_004:
"Arcane Blast"
play = Hit(TARGET, 2)
class AT_005:
"Polymorph: Boar"
play = Morph(TARGET, "AT_005t")
##
# Secrets
class AT_002:
"Effigy"
secret = Death(FRIENDLY + MINION).on(FULL_BOARD | (
Reveal(SELF),
Summon(CONTROLLER, RandomMinion(cost=COST(Death.ENTITY)))
))
|
beheh/fireplace
|
fireplace/cards/tgt/mage.py
|
Python
|
agpl-3.0
| 770
|
[
"BLAST"
] |
556063811b029b9ab1620770c8512355016028b4a28dd7f92dfe9777688d9bf5
|
#!/usr/bin/python
"""
Extracting features from VTK input files.
Authors:
- Forrest Sheng Bao (forrest.bao@gmail.com) http://fsbao.net
Copyright 2012, Mindboggle team (http://mindboggle.info), Apache v2.0 License
For algorithmic details, please check:
Forrest S. Bao, et al., Automated extraction of nested sulcus features from human brain MRI data,
IEEE EMBC 2012, San Diego, CA
Dependencies:
python-vtk: vtk's official Python binding
numpy
io_vtk : under mindboggle/utils
"""
from numpy import mean, std, median, array, zeros, eye, flatnonzero, sign, matrix, zeros_like
import os.path
import cPickle
#import io_vtk # Assummng io_vtk is in PYTHONPATH
from mindboggle.utils import io_vtk
import sys
#-----------------Begin function definitions-------------------------------------------------------------
def fcNbrLst(FaceDB, Hemi):
'''Get a neighbor list of faces, also the vertex not shared with current face
Data structure:
NbrLst: a list of size len(FaceDB)
NbrLst[i]: two lists of size 3 each.
NbrLst[i][0] = [F0, F1, F2]: F0 is the neighbor of face i facing V0 where [V0, V1, V2] is face i. And so forth.
NbrLst[i][1] = [V0p, V1p, V2p]: V0p is the vertex of F0 that is not shared with face i
'''
NbrFile = Hemi + '.fc.nbr'
if os.path.exists(NbrFile):
#return fileio.loadFcNbrLst(NbrFile)
print "loading face nbr lst from:" , NbrFile
Fp = open(NbrFile, 'r')
NbrLst = cPickle.load(Fp)
Fp.close()
return NbrLst
print "calculating face neighbor list"
FaceNo = len(FaceDB)
NbrLst = []
[NbrLst.append([[-1,-1,-1], [-1,-1,-1]]) for i in xrange(FaceNo)]
Done =[]
[Done.append(0) for i in xrange(FaceNo)]
for i in xrange(0, FaceNo):
# for i in xrange(0, 2600+1):
# print i
Face = FaceDB[i]
# [V0, V1, V2] = Face
# Found = 0 # if Found == 1, no need to try other faces
for j in xrange(i+1, FaceNo):
AnotherFace = FaceDB[j]
for Idx in xrange(0,2):
ChkFc1 = Face[Idx]
for ChkFc2 in Face[Idx+1:3]:
if ChkFc1 in AnotherFace:
if ChkFc2 in AnotherFace:
NbrID1 = 3 - Face.index(ChkFc1) - Face.index(ChkFc2) # determine it's F0, F1 or F2.
NbrLst[i][0][NbrID1] = j
NbrID2 = 3 - AnotherFace.index(ChkFc1) - AnotherFace.index(ChkFc2) # determine it's F0, F1 or F2.
NbrLst[j][0][NbrID2] = i
# Vp1 = AnotherFace[NbrID2]# determine V{0,1,2}p
# Vp2 = Face[NbrID1]# determine V{0,1,2}p
NbrLst[i][1][NbrID1] = AnotherFace[NbrID2]
NbrLst[j][1][NbrID2] = Face[NbrID1]
Done[i] += 1
Done[j] += 1
if Done[i] ==3:
break # all three neighbors of Face has been found
Fp = open(NbrFile, 'w')
# Commented 2011-11-27 23:54
# for i in xrange(0, len(FaceDB)
# for j in NbrLst[i]:
# Fp.write(str(j[0]) + '\t' + str(j[1]) + '\t' + str(j[2]) + '\t')
# Fp.write('\n')
# End of Commented 2011-11-27 23:54
cPickle.dump(NbrLst, Fp)
Fp.close()
return NbrLst
def vrtxNbrLst(VrtxNo, FaceDB, Hemi):
"""Given the number of vertexes and the list of faces, find the neighbors of each vertex, in list formate.
"""
NbrFile = Hemi + '.vrtx.nbr'
if os.path.exists(NbrFile):
#return fileio.loadVrtxNbrLst(NbrFile) # change to cPickle
print "Loading vertex nbr lst from:", NbrFile
Fp = open(NbrFile, 'r') # need to use cPickle
NbrLst = cPickle.load(Fp)
Fp.close()
return NbrLst
print "Calculating vertex neighbor list"
NbrLst = [[] for i in xrange(0, VrtxNo)]
for Face in FaceDB:
[V0, V1, V2] = Face
if not V1 in NbrLst[V0]:
NbrLst[V0].append(V1)
if not V2 in NbrLst[V0]:
NbrLst[V0].append(V2)
if not V0 in NbrLst[V1]:
NbrLst[V1].append(V0)
if not V2 in NbrLst[V1]:
NbrLst[V1].append(V2)
if not V0 in NbrLst[V2]:
NbrLst[V2].append(V1)
if not V1 in NbrLst[V2]:
NbrLst[V2].append(V1)
Fp = open(NbrFile, 'w') # need to use cPickle
# Commented 2011-11-27 23:54
# for i in xrange(0, VrtxNo):
# [Fp.write(str(Vrtx) + '\t') for Vrtx in NbrLst[i]]
# Fp.write('\n')
# End of Commented 2011-11-27 23:54
cPickle.dump(NbrLst, Fp)
Fp.close()
return NbrLst
def compnent(FaceDB, Basin, NbrLst, PathHeader):
'''Get connected component, in each of all basins, represented as faces and vertex clouds
Parameters
-----------
NbrLst : list
neighbor list of faces, NOT VERTEXES
PathHeader : header of the path to save component list
'''
FcCmpntFile = PathHeader + '.cmpnt.face'
VrtxCmpntFile = PathHeader + '.cmpnt.vrtx'
if os.path.exists(FcCmpntFile) and os.path.exists(VrtxCmpntFile):
# return fileio.loadCmpnt(FcCmpntFile), fileio.loadCmpnt(VrtxCmpntFile)
print "Loading Face Components from:", FcCmpntFile
Fp = open(FcCmpntFile, 'r')
FcCmpnt = cPickle.load(Fp)
Fp.close()
print "Loading Vertex Components from:", VrtxCmpntFile
Fp = open(VrtxCmpntFile, 'r')
VrtxCmpnt = cPickle.load(Fp)
Fp.close()
return FcCmpnt, VrtxCmpnt
print "calculating face and vertex components"
Visited = [False for i in xrange(0, len(Basin))]
FcCmpnt, VrtxCmpnt = [], []
while not allTrue(Visited):
Seed = dfsSeed(Visited, Basin)# first basin face that is not True in Visited
# print Seed
Visited, FcMbr, VrtxMbr = dfs(Seed, Basin, Visited, NbrLst, FaceDB)# DFS to fine all connected members from the Seed
FcCmpnt.append(FcMbr)
VrtxCmpnt.append(VrtxMbr)
# fileio.writeCmpnt(FcCmpnt, FcCmpntFile)
# fileio.writeCmpnt(VrtxCmpnt, VrtxCmpntFile)
Fp = open(FcCmpntFile, 'w')
cPickle.dump(FcCmpnt, Fp)
Fp.close()
Fp = open(VrtxCmpntFile, 'w')
cPickle.dump(VrtxCmpnt, Fp)
Fp.close()
return FcCmpnt, VrtxCmpnt
def judgeFace1(FaceID, FaceDB, CurvatureDB, Threshold = 0):
"""Check whether a face satisfies the zero-order criterion
If all three vertexes of a face have negative curvature, return True. O/w, False.
Input
======
FaceID: integer
the ID of a face, indexing from 0
FaceDB: list
len(FaceDB) == number of faces in the hemisphere
FaceDB[i]: a 1-D list of the IDs of three vertexes that consist of the i-th face
CurvatureDB: list
len(CurvatureDB) == number of vertexes in the hemisphere
CurvatureDB[i]: integer, the curvature of the i-th vertex
"""
[V0, V1, V2] = FaceDB[FaceID]
##
# if (CurvatureDB[V0] > Threshold) and (CurvatureDB[V1] > Threshold) and (CurvatureDB[V2] > Threshold):
# return True
# else:
# return False
##
if (CurvatureDB[V0] <= Threshold) or (CurvatureDB[V1] <= Threshold) or (CurvatureDB[V2] <= Threshold):
return False
else:
return True
def basin(FaceDB, CurvatureDB, Threshold = 0):
'''Given a list of faces and per-vertex curvature value, return a list of faces comprising basins
'''
Basin = []
Left = []
for FaceID in xrange(0, len(FaceDB)):
if judgeFace1(FaceID, FaceDB, CurvatureDB, Threshold = Threshold):
Basin.append(FaceID)
else:
Left.append(FaceID)
return Basin, Left
def allTrue(List):
'''Check whether a logical list contains non-True elements.
'''
# for Bool in List:
# if not Bool:
# return False
# return True
return all(x==True for x in List)
def dfsSeed(Visited, Basin):
'''Given a list of faces comprising the basins, find a face that has not been visited which will be used as the seeding point for DFS.
'''
for i in xrange(0, len(Visited)):
if not Visited[i]:
return Basin[i]
def dfs(Seed, Basin, Visited, NbrLst, FaceDB):
'''Return all members (faces and vertexes) of the connected component that can be found by DFS from a given seed point
Parameters
-----------
NbrLst : list
neighbor list of faces, NOT VERTEXES
'''
Queue = [Seed]
FcMbr = [] # members that are faces of this connected component
VrtxMbr = [] # members that are vertex of this connected component
while Queue != []:
# print Queue
Seed = Queue.pop()
if Seed in Basin:
if not Visited[Basin.index(Seed)]:
Visited[Basin.index(Seed)] = True
FcMbr.append(Seed)
for Vrtx in FaceDB[Seed]:
if not (Vrtx in VrtxMbr):
VrtxMbr.append(Vrtx)
Queue += NbrLst[Seed][0]
return Visited, FcMbr, VrtxMbr
def pmtx(Adj):
'''Print a matrix as shown in MATLAB stdio
'''
for j in xrange(0,25):
print j,
print '\n'
for i in xrange(0, 25):
print i,
for j in xrange(0, 25):
print Adj[i,j],
print '\n'
def all_same(items):
return all(x == items[0] for x in items)
def univariate_pits(CurvDB, VrtxNbrLst, VrtxCmpnt, Thld):
'''Finding pits using one variable, e.g., depth.
'''
print "Extracting pits"
# Stack, P, Child, M, B, End, L = [], [], {}, -1, [], {}, 10
C = [-1 for i in xrange(0, len(VrtxNbrLst))]
Child = {}
End = {}
M = -1
B = []
for Cmpnt in VrtxCmpnt: # for each component
Curv=dict([(i, CurvDB[i]) for i in Cmpnt])
Stack = []
for Vrtx, Cvtr in sorted(Curv.iteritems(), key=lambda (k,v): (v,k)):
Stack.append(Vrtx)
Visited = []
while len(Stack) >0:
Skip_This_Vrtx = False # updated Forrest 2012-02-12, skip vertexes whose neighbors are not in the component to denoise
Vrtx = Stack.pop()
WetNbr = []
NbrCmpnt = []
for Nbr in list(set(VrtxNbrLst[Vrtx])):
if not Nbr in Cmpnt:
Skip_This_Vrtx = True
if Nbr in Visited: # This condition maybe replaced by If C[Vrtx] ==-1
WetNbr.append(Nbr)
if C[Nbr] != -1:
NbrCmpnt.append(C[Nbr])
if Skip_This_Vrtx :
continue
Visited.append(Vrtx)
if len(WetNbr) == 1: # if the vertex has one neighbor that is already wet
[Nbr] = WetNbr
if End[C[Nbr]]:
C[Vrtx] = Child[C[Nbr]]
else:
C[Vrtx] = C[Nbr]
# print C[Nbr], "==>", C[V]
elif len(WetNbr) >1 and all_same(NbrCmpnt): # if the vertex has more than one neighbors which are in the same component
if End[NbrCmpnt[0]]:
C[Vrtx] = Child[NbrCmpnt[0]]
else:
C[Vrtx] = NbrCmpnt[0]
elif len(WetNbr) >1 and not all_same(NbrCmpnt): # if the vertex has more than one neighbors which are NOT in the same component
M += 1
C[Vrtx] = M
for Nbr in WetNbr:
Child[C[Nbr]] = M
End[C[Nbr]] = True
End[M] = False
# elif : # the vertex's neighbor are not fully in the component
else:
M += 1
if CurvDB[Vrtx] > Thld:
B.append(Vrtx)
End[M] = False
C[Vrtx] = M
return B, C, Child
def clouchoux(MCurv, GCurv):
'''Judge whether a vertex is a pit in Clouchoux's definition
Parameters
===========
MCurv: float
mean curvature of a vertex
H in Clouchoux's paper
GCurv: float
mean curvature of a vertex
K in Clouchoux's paper
Returns
========
True if this is a pit. False, otherwise.
Notes
=========
(Since Joachim's code updates all the time, this settings has to be updated accordingly)
In Clochoux's paper, the following definitions are used:
H > 0, K > 0: pit, in Clouchoux's paper
H < 0, K > 0: peak, in Clouchoux's paper
If features are computed by ComputePricipalCurvature(),
use this settings to get proper pits:
H > 3, K < 0 (curvatures not normalized)
H > 0.2, K < 0 (curvatures normalized)
'''
# if (MCurv > 3) and (GCurv < 0):
if (MCurv > 0.2) and (GCurv < 0):
return True
else:
return False
def clouchoux_pits(Vertexes, MCurv, GCurv):
'''Extract pits using Clouchoux's definition
'''
Pits = []
for i in xrange(len(Vertexes)):
if clouchoux(MCurv[i], GCurv[i]):
Pits.append(i)
print len(Pits), "Pits found"
return Pits
def getBasin_and_Pits(Maps, Mesh, SulciVTK, PitsVTK, SulciThld = 0, PitsThld = 0, Quick=False, Clouchoux=False, SulciMap='depth'):
'''Extracting basin and pits (either local minimum approach or Clouchoux's)
Parameters
=============
Maps: dictionary
Keys are map names, e.g., depth or curvatures.
Values are per-vertex maps, e.g., curvature map.
Mesh: 2-tuple of lists
the first list has coordinates of vertexes while the second defines triangles on the mesh
This is a mandatory surface, normally a non-inflated surface.
SulciThld: float
the value to threshold the surface to separate sulci and gyri
PitsThld: float
vertexes deeper than this value can be considered as pits
Quick: Boolean
If true, extract sulci only (no component ID, only thresholding), skipping pits and later fundi.
Clouchoux: Boolean
If true, extract pits using Clouchoux's definition. O/w, local minimum approach.
SulciMap: string
The map to be used to get sulci
by default, 'depth'
'''
def write_surface_with_LUTs(File, Points, Faces, Maps):
"""Like write_scalars in io_vtk but no writing of vertices
"""
print "writing sulci into VTK file:", File
Fp = open(File,'w')
io_vtk.write_header(Fp)
io_vtk.write_points(Fp, Points)
io_vtk.write_faces(Fp, Faces)
if len(Maps) > 0:
# Make sure that LUTs is a list of lists
Count = 0
for LUT_name, LUT in Maps.iteritems():
if Count == 0 :
io_vtk.write_scalars(Fp, LUT, LUT_name)
else:
io_vtk.write_scalars(Fp, LUT, LUT_name, begin_scalars=False)
Count += 1
Fp.close()
return None
def write_pits_without_LUTs(File, Points, Indexes):
"""Like write_scalars in io_vtk but no writing of vertices
"""
print "writing pits into VTK file:", File
Fp = open(File,'w')
io_vtk.write_header(Fp)
io_vtk.write_points(Fp, Points)
io_vtk.write_vertices(Fp, Indexes)
Fp.close()
return None
print "\t thresholding the surface using threshold = ", SulciThld
[Vertexes, Faces] = Mesh
MapBasin = Maps[SulciMap]
Basin, Gyri = basin(Faces, Maps[SulciMap], Threshold = SulciThld)
if not Quick:
LastSlash = len(SulciVTK) - SulciVTK[::-1].find('/')
Hemi = SulciVTK[:SulciVTK[LastSlash:].find('.')+LastSlash]# path up to which hemisphere, e.g., /home/data/lh
VrtxNbr = vrtxNbrLst(len(Vertexes), Faces, Hemi)
FcNbr = fcNbrLst(Faces, Hemi)
FcCmpnt, VrtxCmpnt = compnent(Faces, Basin, FcNbr, ".".join([Hemi, SulciMap, str(SulciThld)]))
CmpntLUT = [-1 for i in xrange(len(MapBasin))]
for CmpntID, Cmpnt in enumerate(VrtxCmpnt):
for Vrtx in Cmpnt:
CmpntLUT[Vrtx] = CmpntID
Maps['CmpntID'] = CmpntLUT
if Clouchoux:
Pits = clouchoux_pits(Vertexes, Maps['meancurv'], Maps['gausscurv'])
else: # local minimum approach
MapPits = Maps[SulciMap] # Users will get the option to select pits extraction map in the future.
Pits, Parts, Child = univariate_pits(MapPits, VrtxNbr, VrtxCmpnt, PitsThld)
Maps['hierarchy'] = Parts
else:
print "\t\t Thresholding the surface to get sulci only."
Faces = [map(int,i) for i in Faces]# this is a temporal fix. It won't cause precision problem because sys.maxint is 10^18.
Vertexes = map(list, Vertexes)
write_surface_with_LUTs(SulciVTK, Vertexes, [Faces[i] for i in Basin], Maps)
if Quick:
sys.exit()
write_pits_without_LUTs(PitsVTK, Vertexes, Pits)
# output tree hierarchies of basal components
# print "writing hierarchies of basal components"
# WetFile = PrefixExtract + '.pits.hier'
# WetP = open(WetFile,'w')
# for LowComp, HighComp in Child.iteritems():
# WetP.write(str(LowComp) + '\t' + str(HighComp) + '\n')
# WetP.close()
# end of output tree hierarchies of basal components
# End of Get pits Forrest 2011-05-30 10:16
# a monolithic code output each component
# Dic = {}
# for CID, Cmpnt in enumerate(FcCmpnt):
# Dic[CID] = len(Cmpnt)
#
# #Dic = sorted(Dic.iteritems(), key= lambda (k,v,) : (v,k))
# Counter = 1
# for CID, Size in sorted(Dic.iteritems(), key=lambda (k,v): (v,k)):
## print Size
# Rank = len(FcCmpnt) - Counter +1
# Fp = open(BasinFile + '.' + SurfFile[-1*SurfFile[::-1].find('.'):] + '.' + str(Rank) +'-th.vtk','w')
# Vertex, Face = fileio.readSurf(SurfFile)
# FundiList = FcCmpnt[CID]
# libvtk.wrtFcFtr(Fp, Vertex, Face, FundiList)
# Fp.close()
# Counter += 1
# a monolithic code output each component
#---------------End of function definitions---------------------------------------------------------------
|
binarybottle/mindboggle_sidelined
|
fundi_from_pits/libbasin.py
|
Python
|
apache-2.0
| 18,234
|
[
"VTK"
] |
b8a1ac9f3104b48f3a72970362f7850910bbe64f51674829c968ad4d4ea4f6af
|
import ddapp.applogic as app
from ddapp import lcmUtils
from ddapp import transformUtils
from ddapp import visualization as vis
from ddapp import filterUtils
from ddapp import drcargs
from ddapp.shallowCopy import shallowCopy
from ddapp.timercallback import TimerCallback
from ddapp import vtkNumpy
from ddapp import objectmodel as om
import ddapp.vtkAll as vtk
from ddapp.debugVis import DebugData
import PythonQt
from PythonQt import QtCore, QtGui
import bot_core as lcmbotcore
import numpy as np
from ddapp.simpletimer import SimpleTimer
from ddapp import ioUtils
import sys
import drc as lcmdrc
from ddapp.consoleapp import ConsoleApp
class PointCloudItem(om.ObjectModelItem):
def __init__(self, model):
om.ObjectModelItem.__init__(self, 'PointCloud', om.Icons.Eye)
self.model = model
self.scalarBarWidget = None
self.addProperty('Color By', 0,
attributes=om.PropertyAttributes(enumNames=['Solid Color']))
self.addProperty('Updates Enabled', True)
self.addProperty('Framerate', model.targetFps,
attributes=om.PropertyAttributes(decimals=0, minimum=1.0, maximum=30.0, singleStep=1, hidden=False))
self.addProperty('Visible', model.visible)
def _onPropertyChanged(self, propertySet, propertyName):
om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName == 'Updates Enabled':
if self.getProperty('Updates Enabled'):
self.model.start()
else:
self.model.stop()
elif propertyName == 'Visible':
self.model.setVisible(self.getProperty(propertyName))
elif propertyName == 'Framerate':
self.model.setFPS(self.getProperty('Framerate'))
elif propertyName == 'Color By':
self._updateColorBy()
self.model.polyDataObj._renderAllViews()
def _updateColorBy(self):
arrayMap = {
0 : 'Solid Color'
}
colorBy = self.getProperty('Color By')
arrayName = arrayMap.get(colorBy)
self.model.polyDataObj.setProperty('Color By', arrayName)
class PointCloudSource(TimerCallback):
def __init__(self, view, _PointCloudQueue):
self.view = view
self.PointCloudQueue = _PointCloudQueue
self.visible = True
self.p = vtk.vtkPolyData()
utime = PointCloudQueue.getPointCloudFromPointCloud(self.p)
self.polyDataObj = vis.PolyDataItem('pointcloud source', shallowCopy(self.p), view)
self.polyDataObj.actor.SetPickable(1)
self.polyDataObj.initialized = False
om.addToObjectModel(self.polyDataObj)
self.queue = PythonQt.dd.ddBotImageQueue(lcmUtils.getGlobalLCMThread())
self.queue.init(lcmUtils.getGlobalLCMThread(), drcargs.args().config_file)
self.targetFps = 30
self.timerCallback = TimerCallback(targetFps=self.targetFps)
self.timerCallback.callback = self._updateSource
#self.timerCallback.start()
def start(self):
self.timerCallback.start()
def stop(self):
self.timerCallback.stop()
def setFPS(self, framerate):
self.targetFps = framerate
self.timerCallback.stop()
self.timerCallback.targetFps = framerate
self.timerCallback.start()
def setVisible(self, visible):
self.polyDataObj.setProperty('Visible', visible)
def _updateSource(self):
p = vtk.vtkPolyData()
utime = self.PointCloudQueue.getPointCloudFromPointCloud(p)
if not p.GetNumberOfPoints():
return
sensorToLocalFused = vtk.vtkTransform()
self.queue.getTransform('VELODYNE', 'local', utime, sensorToLocalFused)
p = filterUtils.transformPolyData(p,sensorToLocalFused)
self.polyDataObj.setPolyData(p)
if not self.polyDataObj.initialized:
self.polyDataObj.initialized = True
def init(view):
global PointCloudQueue, _pointcloudItem, _pointcloudSource
PointCloudQueue = PythonQt.dd.ddPointCloudLCM(lcmUtils.getGlobalLCMThread())
PointCloudQueue.init(lcmUtils.getGlobalLCMThread(), drcargs.args().config_file)
_pointcloudSource = PointCloudSource(view, PointCloudQueue)
_pointcloudSource.start()
sensorsFolder = om.getOrCreateContainer('sensors')
_pointcloudItem = PointCloudItem(_pointcloudSource)
om.addToObjectModel(_pointcloudItem, sensorsFolder)
def startButton():
view = app.getCurrentRenderView()
init(view)
_pointcloudSource.start()
app.addToolbarMacro('start live pointcloud', startButton)
|
gizatt/director
|
src/python/ddapp/pointcloudlcm.py
|
Python
|
bsd-3-clause
| 4,666
|
[
"VTK"
] |
fa0c646377338a455f1d36b65b5c24c0cc0df68a3670c71573199f3d8559a248
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import unittest as ut
import unittest_decorators as utx
import numpy as np
import numpy.testing
import espressomd
from espressomd import lb
@utx.skipIfMissingGPU()
class TestLBGetUAtPos(ut.TestCase):
"""
Check velocities at particle positions are sorted by ``id`` and
quantitatively correct (only LB GPU).
"""
@classmethod
def setUpClass(cls):
cls.params = {
'tau': 0.01,
'agrid': 0.5,
'box_l': [12.0, 12.0, 12.0],
'dens': 0.85,
'viscosity': 30.0,
'friction': 2.0,
'gamma': 1.5
}
cls.system = espressomd.System(box_l=[1.0, 1.0, 1.0])
cls.system.box_l = cls.params['box_l']
cls.system.cell_system.skin = 0.4
cls.system.time_step = 0.01
cls.n_nodes_per_dim = int(cls.system.box_l[0] / cls.params['agrid'])
for p in range(cls.n_nodes_per_dim):
# Set particles exactly between two LB nodes in x direction.
cls.system.part.add(pos=[(p + 1) * cls.params['agrid'],
0.5 * cls.params['agrid'],
0.5 * cls.params['agrid']])
cls.lb_fluid = lb.LBFluidGPU(
visc=cls.params['viscosity'],
dens=cls.params['dens'],
agrid=cls.params['agrid'],
tau=cls.params['tau'],
)
cls.system.actors.add(cls.lb_fluid)
cls.vels = np.zeros((cls.n_nodes_per_dim, 3))
cls.vels[:, 0] = np.arange(cls.n_nodes_per_dim, dtype=float)
cls.interpolated_vels = cls.vels.copy()
cls.interpolated_vels[:, 0] += 0.5
for n in range(cls.n_nodes_per_dim):
cls.lb_fluid[n, 0, 0].velocity = cls.vels[n, :]
cls.system.integrator.run(0)
def test_get_u_at_pos(self):
"""
Test if linear interpolated velocities are equal to the velocities at
the particle positions. This test uses the two-point coupling under
the hood.
"""
numpy.testing.assert_allclose(
self.interpolated_vels[:-1],
self.lb_fluid.get_interpolated_fluid_velocity_at_positions(
self.system.part[:].pos, False)[:-1],
atol=1e-4)
if __name__ == "__main__":
suite = ut.TestSuite()
suite.addTests(ut.TestLoader().loadTestsFromTestCase(TestLBGetUAtPos))
result = ut.TextTestRunner(verbosity=4).run(suite)
sys.exit(not result.wasSuccessful())
|
fweik/espresso
|
testsuite/python/lb_get_u_at_pos.py
|
Python
|
gpl-3.0
| 3,196
|
[
"ESPResSo"
] |
a62f28683b478b5e8decc9f49ad629ade589c80c450e01399538a3ae6e537772
|
# coding: utf-8
# In[1]:
# opengrid imports
from opengrid.library import misc, houseprint, caching
from opengrid.library.analysis import DailyAgg
from opengrid import config
c=config.Config()
# other imports
import pandas as pd
import charts
import numpy as np
import os
import datetime as dt
import pytz
BXL = pytz.timezone('Europe/Brussels')
# configuration for the plots
DEV = c.get('env', 'type') == 'dev' # DEV is True if we are in development environment, False if on the droplet
print("Environment configured for development: {}".format(DEV))
if not DEV:
# production environment: don't try to display plots
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.dates import MinuteLocator, HourLocator, DateFormatter, AutoDateLocator, num2date
if DEV:
if c.get('env', 'plots') == 'inline':
get_ipython().magic(u'matplotlib inline')
else:
get_ipython().magic(u'matplotlib qt')
else:
pass # don't try to render plots
plt.rcParams['figure.figsize'] = 12,8
# In[2]:
# Load houseprint from cache if possible, otherwise build it from source
try:
hp_filename = os.path.join(c.get('data', 'folder'), 'hp_anonymous.pkl')
hp = houseprint.load_houseprint_from_file(hp_filename)
print("Houseprint loaded from {}".format(hp_filename))
except Exception as e:
print(e)
print("Because of this error we try to build the houseprint from source")
hp = houseprint.Houseprint()
sensors = hp.get_sensors(sensortype='electricity') # sensor objects
# Remove some sensors
exclude = [
'565de0a7dc64d8370aa321491217b85f' # 3E
]
solar = [x.key for x in hp.search_sensors(type='electricity', system='solar')]
exclude += solar
for s in sensors:
if s.key in exclude:
sensors.remove(s)
hp.init_tmpo()
# The first time, this will take a very looong time to get all the detailed data for building the cache
# Afterwards, this is quick
starttime = dt.time(0, tzinfo=BXL)
endtime = dt.time(5, tzinfo=BXL)
caching.cache_results(hp=hp, sensors=sensors, resultname='elec_min_night_0-5', AnalysisClass=DailyAgg,
agg='min', starttime=starttime, endtime=endtime)
caching.cache_results(hp=hp, sensors=sensors, resultname='elec_max_night_0-5', AnalysisClass=DailyAgg,
agg='max', starttime=starttime, endtime=endtime)
# In[ ]:
cache_min = caching.Cache(variable='elec_min_night_0-5')
cache_max = caching.Cache(variable='elec_max_night_0-5')
dfdaymin = cache_min.get(sensors=sensors)
dfdaymax = cache_max.get(sensors=sensors)
dfdaymin.info()
# The next plot shows that some periods are missing. Due to the cumulative nature of the electricity counter, we still have the total consumption. However, it is spread out of the entire period. So we don't know the standby power during these days, and we have to remove those days.
# In[ ]:
if DEV:
sensor = hp.search_sensors(key='3aa4')[0]
df = sensor.get_data(head=pd.Timestamp('20151117'), tail=pd.Timestamp('20160104'))
charts.plot(df, stock=True, show='inline')
# In[ ]:
def is_submeter(sensor, dfdaymin, dfdaymax):
"""
Return True if this sensor is a sub-meter
sensor = sensor object
"""
other_sensors = sensor.device.get_sensors(sensortype='electricity')
other_sensors.remove(sensor)
if len(other_sensors) == 0:
print("\n{} - {}: no other sensors, this must be main.".format(sensor.device.key, sensor.description))
return False
else:
print("\n{} - {}: comparing with:".format(sensor.device.key, sensor.description))
for o in other_sensors:
# we only check the values for last day
print("* {}:".format(o.description))
sensormin = dfdaymin.ix[-1,sensor.key]
sensormax = dfdaymax.ix[-1,sensor.key]
try:
othermin = dfdaymin.ix[-1].dropna()[o.key]
othermax = dfdaymax.ix[-1].dropna()[o.key]
except:
print(" No data found for sensor {}".format(o.description))
pass
else:
if (sensormin <= othermin) and (sensormax <= othermax):
print(" {} has lower daily min AND max, so it is a submeter.".format(sensor.description))
return True
else:
print(" {} has higher daily min and/or max, we look further.".format(sensor.description))
else:
print("All other sensors have no data OR lower daily min and max. {} must be main.".format(sensor.description))
return False
# In[ ]:
# The function is_submeter makes one obvious error: see results for FL03001566
for col in dfdaymin:
is_submeter(hp.find_sensor(col), dfdaymin, dfdaymax)
# In[ ]:
# Clean out the data:
# First remove sensors that are submeters
for col in dfdaymin:
if is_submeter(hp.find_sensor(col), dfdaymin, dfdaymax):
print("\n!!Removing submeter {}".format(col))
dfdaymin = dfdaymin.drop(col, axis=1)
# Now remove days with too low values to be realistic
dfdaymin[dfdaymin < 10] = np.nan
# Now remove days where the minimum=maximum (within 1 Watt difference)
dfdaymin[(dfdaymax - dfdaymin) < 1] = np.nan
# In[ ]:
dfdaymin.info()
# In[ ]:
if DEV:
charts.plot(dfdaymin, stock=True, show='inline')
# In[ ]:
standby_statistics = dfdaymin.T.describe(percentiles=[0.1,0.5,0.9]).T
# In[ ]:
if DEV:
charts.plot(standby_statistics[['10%', '50%', '90%']], stock=True, show='inline')
# In[ ]:
# Get detailed profiles for the last day
now = pd.Timestamp('now', tz=BXL)
dt_start_of_last_day = pd.Timestamp(dfdaymin.index[-1].date(), tz=BXL)
dt_end_of_last_day = dt_start_of_last_day + pd.Timedelta(hours=endtime.hour, minutes=endtime.minute)
sensors = map(hp.find_sensor, dfdaymin.columns)
df_details = hp.get_data(sensors = sensors, head=dt_start_of_last_day, tail=dt_end_of_last_day)
df_details.fillna(method='ffill', inplace=True)
df_details.fillna(method='bfill', inplace=True)
# ### Boxplot approach. Possible for a period of maximum +/- 2 weeks.
# In[ ]:
# choose a period
look_back_days = 10
dt_start_of_period = dt_start_of_last_day - pd.Timedelta(days=look_back_days-1)
dfdaymin_period = dfdaymin.ix[dt_start_of_period:].dropna(axis=1, how='all')
# In[ ]:
box = [dfdaymin_period.loc[i,:].dropna().values for i in dfdaymin_period.index]
for sensor in dfdaymin_period.columns:
fig=plt.figure(figsize=(10,5))
ax1=plt.subplot(121)
ax1.boxplot(box, positions=range(len(box)), notch=False)
ax1.plot(range(len(box)), dfdaymin_period[sensor], 'rD', ms=10, label='Standby power')
xticks = [x.strftime(format='%d/%m') for x in dfdaymin_period.index]
plt.xticks(range(len(box)), xticks, rotation='vertical')
plt.title(hp.find_sensor(sensor).device.key + ' - ' + hp.find_sensor(sensor).description)
ax1.grid()
ax1.set_ylabel('Watt')
plt.legend(numpoints=1, frameon=False)
ax2=plt.subplot(122)
try:
ax2.plot_date(df_details[sensor].index, df_details[sensor].values, 'b-', label='Last night')
ax2.xaxis_date(tz=BXL) #Put timeseries plot in local time
# rotate the labels
plt.xticks(rotation='vertical')
ax2.set_ylabel('Watt')
ax2.set_xlabel('Local time (BXL)')
ax2.grid()
xax = ax2.get_xaxis() # get the x-axis
xax.set_major_locator(HourLocator())
xax.set_minor_locator(MinuteLocator(30))
adf = xax.get_major_formatter() # the the auto-formatter
adf.scaled[1./24] = '%H:%M' # set the < 1d scale to H:M
adf.scaled[1.0] = '%Y-%m-%d' # set the > 1d < 1m scale to Y-m-d
adf.scaled[30.] = '%Y-%m' # set the > 1m < 1Y scale to Y-m
adf.scaled[365.] = '%Y' # set the > 1y scale to Y
plt.legend(loc='upper right', frameon=False)
plt.tight_layout()
except Exception as e:
print(e)
else:
plt.savefig(os.path.join(c.get('data', 'folder'), 'figures', 'standby_horizontal_'+sensor+'.png'), dpi=100)
pass
if not DEV:
plt.close()
# ### Percentile approach. Useful for longer time periods, but tweaking of graph still needed
# In[ ]:
# choose a period
look_back_days = 40
dt_start_of_period = dt_start_of_last_day - pd.Timedelta(days=look_back_days-1)
dfdaymin_period = dfdaymin.ix[dt_start_of_period:].dropna(axis=1, how='all')
df = dfdaymin_period.join(standby_statistics[['10%', '50%', '90%']], how='left')
# In[ ]:
for sensor in dfdaymin_period.columns:
plt.figure(figsize=(10,8))
ax1=plt.subplot(211)
ax1.plot_date(df.index, df[u'10%'], '-', lw=2, color='g', label=u'10% percentile')
ax1.plot_date(df.index, df[u'50%'], '-', lw=2, color='orange', label=u'50% percentile')
ax1.plot_date(df.index, df[u'90%'], '-', lw=2, color='r', label=u'90% percentile')
ax1.plot_date(df.index, df[sensor], 'rD', ms=7, label='Your standby power')
ax1.legend()
locs, lables=plt.xticks()
xticks = [x.strftime(format='%d/%m') for x in num2date(locs)]
plt.xticks(locs, xticks, rotation='vertical')
plt.title(hp.find_sensor(sensor).device.key + ' - ' + sensor)
ax1.grid()
ax1.set_ylabel('Watt')
ax2=plt.subplot(212)
try:
ax2.plot_date(df_details[sensor].index, df_details[sensor].values, 'b-', label='Detailed consumption of last night')
ax2.xaxis_date(tz=BXL) #Put timeseries plot in local time
# rotate the labels
plt.xticks(rotation='vertical')
ax2.set_ylabel('Watt')
ax2.set_xlabel('Local time (BXL)')
ax2.grid()
xax = ax2.get_xaxis() # get the x-axis
xax.set_major_locator(HourLocator())
xax.set_minor_locator(MinuteLocator(30))
adf = xax.get_major_formatter() # the the auto-formatter
adf.scaled[1./24] = '%H:%M' # set the < 1d scale to H:M
adf.scaled[1.0] = '%Y-%m-%d' # set the > 1d < 1m scale to Y-m-d
adf.scaled[30.] = '%Y-%m' # set the > 1m < 1Y scale to Y-m
adf.scaled[365.] = '%Y' # set the > 1y scale to Y
plt.legend(loc='upper right', frameon=False)
plt.tight_layout()
except Exception as e:
print(e)
else:
plt.savefig(os.path.join(c.get('data', 'folder'), 'figures', 'standby_vertical_'+sensor+'.png'), dpi=100)
pass
if not DEV:
plt.close()
# In[ ]:
|
JrtPec/opengrid
|
opengrid/recipes/electricity_standby.py
|
Python
|
apache-2.0
| 10,486
|
[
"ADF"
] |
7e0383bfe72f135afb92303fdb4155d8f3067a8bd76f699122c42e001881d766
|
#! /usr/bin/python
"""
COllection of functions and classes to parse and filter hit tables from:
blast
hmmer
last
diamond
infernal
SAM
GFF
"""
import logging
import re
import sys
from edl.util import InputFile, parseExp
logger = logging.getLogger(__name__)
#############
# Constants #
#############
GENE = 'gene'
LIZ = 'liz'
YANMEI = 'yanmei'
BLASTPLUS = 'blast'
FRHIT = 'frhit'
LAST0 = 'last'
HMMSCANDOM = 'hmmscandom'
HMMSEARCHDOM = 'hmmsearchdom'
HMMSCAN = 'hmmscan'
HMMSEARCH = 'hmmsearch'
CMSEARCH = 'cmsearch'
CMSCAN = 'cmscan'
SAM = 'sam'
GFF = 'gff'
PAF = 'paf'
formatsWithNoDescription = [LAST0, FRHIT, BLASTPLUS, PAF, SAM]
cigarRE = re.compile(r'\d+[^\d]')
#############
# Classes #
#############
class FilterParams:
@staticmethod
def create_from_arguments(arguments, ignore=[], translate={}):
"""
Translate a NameSpace object created using the
add_hit_table_arguments below into a FilterParams object.
The attributes format, sort are expected
to be in the arguments object with the 'hitTable' prefix.
The attributes bits, evalue, pctid, aln, length, hits_per_read,
hsps_per_hit, and nonoverlapping with the prefix 'filter'.
For example, arguments.hitTableFormat will be copied to
params.format and arguments.filter_top_pct to params.top_pct.
translate= should be set to a dictionary mapping attributes
of the arguments object to the desired attribuets of the
FilterParams object.
ignore= should be a list of the standard params to be skipped
(eg: filter_top_pct).
"""
params = FilterParams()
# check the nonstandard one(s)
for (oname, pname) in translate.items():
if hasattr(arguments, oname):
setattr(params, pname, getattr(arguments, oname))
# get the standard ones
for param in [
'bits',
'evalue',
'pctid',
'length',
'aln',
'hits_per_read',
'hsps_per_hit',
'nonoverlapping',
'top_pct']:
oparam = 'filter_' + param
if oparam not in ignore:
if hasattr(arguments, oparam):
setattr(params, param, getattr(arguments, oparam))
for param in ['format', 'sort']:
oparam = 'hitTable' + param[0].upper() + param[1:]
if oparam not in ignore:
if hasattr(arguments, oparam):
setattr(params, param, getattr(arguments, oparam))
logging.debug("%r" % (arguments))
logging.debug("%r" % (params))
return params
def __init__(
self,
format=GENE,
top_pct=-1,
bits=0.0,
evalue=None,
pctid=0.0,
length=0,
aln=None,
hits_per_read=0,
hsps_per_hit=0,
nonoverlapping=-1,
sort=None,
bad_refs=None):
self.format = format
self.top_pct = top_pct
self.bits = bits
self.evalue = evalue
self.pctid = pctid
self.length = length
self.aln = aln
self.hits_per_read = hits_per_read
self.hsps_per_hit = hsps_per_hit
self.nonoverlapping = nonoverlapping
self.sort = sort
self.bad_refs = None
def set_nonoverlapping(self, value):
self.nonoverlapping = value
def __repr__(self):
return ("FilterParams(format=%r, top_pct=%r, bits=%r, evalue=%r, "
"pctid=%r, length=%r, aln=%r, hits_per_read=%r, "
"hsps_per_hit=%r, nonoverlapping=%r sort=%r)" % (
self.format, self.top_pct, self.bits, self.evalue,
self.pctid, self.length, self.aln, self.hits_per_read,
self.hsps_per_hit, self.nonoverlapping, self.sort,
))
"""
def __setattr__(self, name, value):
if name == 'pctid' and value > 1:
logger.warning("requested PCTID filter is grater than 1, "
"so we're scaling it down by 100x")
value = value / 100
super().__setattr__(name, value)
"""
class EmptyHitException(Exception):
pass
class Hit:
"""
Object representing a single hit from a search program like blast
or fr-hit
"""
@staticmethod
def getHit(line, options):
try:
return Hit(line, options)
except EmptyHitException:
return None
except Exception:
logger.warn("Error parsing line:\n%s" % (line))
raise
def __init__(self, line, options):
"""
Creates Hit object from a line in a hit table. Options can
be any class with a 'format' property or a string. The string
value of options or options.format must be one of the recognized
formats: 'gene','liz','yanmei','last','frhit'
"""
self.line = line
self.setFormat(options)
self.parseLine(line)
def __repr__(self):
return "Hit(%r,%r)" % (self.line.rstrip('\n\r'), self.format)
def setFormat(self, options):
if isinstance(options, type(GENE)):
# just to make this work if instantiated from repr
self.format = options
else:
self.format = options.format
if self.format == GENE:
self.parseLine = self.parseGeneLine
elif self.format == LIZ:
self.parseLine = self.parseLizLine
elif self.format == YANMEI:
self.parseLine = self.parseYanmeiLine
elif self.format == BLASTPLUS:
self.parseLine = self.parseBlastPlusLine
elif self.format == LAST0:
self.parseLine = self.parseLastalLine
elif self.format == FRHIT:
self.parseLine = self.parseFrHitLine
elif self.format == HMMSEARCHDOM:
self.parseLine = self.parseHmmSearchDomLine
elif self.format == HMMSCANDOM:
self.parseLine = self.parseHmmScanDomLine
elif self.format == HMMSEARCH:
self.parseLine = self.parseHmmSearchLine
elif self.format == HMMSCAN:
self.parseLine = self.parseHmmScanLine
elif self.format == CMSEARCH:
self.parseLine = self.parseCmSearchLine
elif self.format == CMSCAN:
self.parseLine = self.parseCmScanLine
elif self.format == SAM:
self.parseLine = self.parseSamLine
elif self.format == PAF:
self.parseLine = self.parsePafLine
elif self.format == GFF:
self.parseLine = self.parseGFFLine
self.to_gff = lambda self: self.line
else:
sys.exit("Unknown format: %s" % (self.format))
def parseGeneLine(self, line):
cells = line.rstrip('\n\r').split('\t')
self.read = cells[0]
self.readDesc = cells[1]
self.hit = cells[2]
self.hitDesc = cells[3]
self.pctid = float(cells[4])
self.mlen = int(cells[5])
self.qstart = int(cells[6])
self.qend = int(cells[7])
self.hstart = int(cells[8])
self.hend = int(cells[9])
self.score = float(cells[10])
self.evalue = parseExp(cells[11])
self.aln = float(cells[12])
def parseLizLine(self, line):
cells = line.rstrip('\n\r').split('\t')
self.read = cells[0]
self.hit = cells[1]
self.hitDesc = cells[2]
try:
self.pctid = float(cells[3])
except ValueError:
# leave unset if it's not a float
pass
self.mlen = int(cells[4])
self.qstart = int(cells[5])
self.qend = int(cells[6])
self.hstart = int(cells[7])
self.hend = int(cells[8])
self.score = float(cells[9])
self.evalue = parseExp(cells[10])
self.aln = float(cells[11])
def parsePafLine(self, line):
"""
PAF_COLUMNS = ['query','qlen','qstart','qend','strand',
'hit', 'hlen','hstart','hend','matches',
'mlen','mapqv']
"""
cells = line.rstrip('\n\r').split('\t')
self.read = cells[0]
self.qlen = int(cells[1])
self.qstart = int(cells[2])
self.qend = int(cells[3])
if cells[4] == "-":
self.qstart, self.qend = self.qend, self.qstart
self.hit = cells[5]
self.hlen = int(cells[6])
self.hstart = int(cells[7])
self.hend = int(cells[8])
self.matches = int(cells[9])
self.mlen = int(cells[10])
self.pctid = 100 * self.matches / self.mlen
self.aln = self.mlen / self.qlen
self.score = None
def parseSamLine(self, line):
if line[0] == '@':
raise EmptyHitException("reference sequence line")
cells = line.rstrip('\n\r').split('\t')
self.read = cells[0]
self.hit = cells[2]
if self.hit == '*':
raise EmptyHitException("No match")
self.cigar = cells[5]
(alen, alenh, alenq, qstart, qend, pctid) = \
parseCigarString(self.cigar)
self.mlen = alen
self.qstart, self.qend = sam_q_order(qstart, qend, int(cells[1]))
self.hstart = int(cells[3])
self.hend = self.hstart + alenh - 1
self.qlen = len(cells[9])
self.aln = float(self.mlen) / float(self.qlen)
self.score = None
for tagstr in cells[11:]:
if tagstr.startswith('AS'):
self.score = float(tagstr.split(":")[2])
if tagstr.startswith('MD:Z:'):
if pctid == 0:
pctid = get_alignment_percent_identity(tagstr[4:])
if tagstr.startswith('ZW:f:') and self.score is None:
# kallisto will put the probablility here
self.score = float(tagstr[5:])
if self.score is None:
raise Exception("No score (AS tag) found in line:\n%s" % (line))
if pctid != 0:
self.pctid = pctid
# target name accession tlen query name accession
# qlen E-value score bias # of c-Evalue i-Evalue score bias
# from to from to from to acc description of target
def parseHmmScanDomLine(self, line):
if line[0] == '#':
raise EmptyHitException("Comment line")
cells = line.rstrip('\n\r').split()
self.read = cells[3]
qlen = int(cells[5])
self.hit = cells[0]
# self.hlen = int(cells[2])
self.evalue = float(cells[6])
self.score = float(cells[7])
self.hstart = int(cells[15])
self.hend = int(cells[16])
self.qstart = int(cells[17])
self.qend = int(cells[18])
self.hitDesc = cells[22]
self.mlen = 1 + self.qend - self.qstart
self.aln = self.mlen / float(qlen)
# target name accession query name accession E-value
# score bias ue score bias exp reg clu ov env dom rep inc
# description of target
def parseHmmScanLine(self, line):
if line[0] == '#':
raise EmptyHitException("Comment line")
cells = line.rstrip('\n\r').split()
self.read = cells[2]
self.hit = cells[0]
self.evalue = float(cells[4])
self.score = float(cells[5])
self.hitDesc = cells[18]
# target name accession tlen query name accession
# qlen E-value score bias # of c-Evalue i-Evalue score bias
# from to from to from to acc description of target
def parseHmmSearchDomLine(self, line):
if line[0] == '#':
raise EmptyHitException("Comment line")
cells = line.rstrip('\n\r').split()
self.read = cells[0]
qlen = int(cells[2])
self.hit = cells[3]
self.hitDesc = cells[4]
# self.hlen = int(cells[5])
self.evalue = float(cells[6])
self.score = float(cells[7])
self.hstart = int(cells[15])
self.hend = int(cells[16])
self.qstart = int(cells[17])
self.qend = int(cells[18])
self.readDesc = cells[22]
self.mlen = 1 + self.qend - self.qstart
self.aln = self.mlen / float(qlen)
# target name accession query name accession E-value
# score bias ue score bias exp reg clu ov env dom rep inc
# description of target
def parseHmmSearchLine(self, line):
if line[0] == '#':
raise EmptyHitException("Comment line")
cells = line.rstrip('\n\r').split()
self.read = cells[0]
self.hit = cells[2]
self.evalue = float(cells[4])
self.score = float(cells[5])
self.readDesc = cells[18]
# target name accession query name accession mdl mdl
# from mdl to seq from seq to strand trunc pass gc bias score
# E-value inc description of target
def parseCmSearchLine(self, line):
if line[0] == '#':
raise EmptyHitException("Comment line")
cells = line.rstrip('\n\r').split()
self.read = cells[0]
self.hit = cells[3]
self.hitDesc = cells[2]
self.hstart = int(cells[5])
self.hend = int(cells[6])
self.qstart = int(cells[7])
self.qend = int(cells[8])
self.strand = cells[9]
self.evalue = float(cells[15])
self.score = float(cells[14])
self.readDesc = cells[17]
self.mlen = self.hend - self.hstart + 1
# target name accession query name accession mdl mdl
# from mdl to seq from seq to strand trunc pass gc bias score
# E-value inc description of target
def parseCmScanLine(self, line):
if line[0] == '#':
raise EmptyHitException("Comment line")
cells = line.rstrip('\n\r').split()
self.read = cells[2]
self.hit = cells[1]
self.hitDesc = cells[0]
self.hstart = int(cells[5])
self.hend = int(cells[6])
self.qstart = int(cells[7])
self.qend = int(cells[8])
self.strand = cells[9]
self.evalue = float(cells[15])
self.score = float(cells[14])
self.readDesc = cells[17]
self.mlen = self.hend - self.hstart + 1
# score name1 start1 alnSize1 strand1 seqSize1 name2
# start2 alnSize2 strand2 seqSize2 blocks
def parseLastalLine(self, line):
# logger.debug(line)
if line[0] == '#':
raise EmptyHitException("Comment line")
cells = line.rstrip('\n\r').split('\t')
self.score = float(cells[0])
self.hit = cells[1]
hmlen = int(cells[3])
hlen = int(cells[5])
self.hlen = hlen
if cells[4] == '+':
self.hstart = int(cells[2]) + 1
self.hend = self.hstart + hmlen - 1
else:
# start was in reverse strand
self.hstart = hlen - int(cells[2])
self.hend = self.hstart - hmlen + 1
self.read = cells[6]
qmlen = int(cells[8])
qlen = int(cells[10])
self.qlen = qlen
if cells[9] == '+':
self.qstart = int(cells[7]) + 1
self.qend = self.qstart + qmlen - 1
else:
# start was in reverse strand
self.qstart = qlen - int(cells[7])
self.qend = self.qstart - qmlen + 1
self.aln = qmlen / float(qlen)
if cells[9] == cells[4]:
self.strand = "+"
else:
self.strand = "-"
# some versions have evalues in the last few spots (eg: E=2.1e-09)
self.evalue = float(cells[13][2:].strip()) if len(cells) > 13 else None
self.mlen = computeLastHitValues(cells[11])
self.hitDesc = None
logger.debug("Span: %d-%d" % (self.qstart, self.qend))
def parseFrHitLine(self, line):
cells = line.rstrip('\n\r').split('\t')
self.read = cells[0]
self.evalue = parseExp(cells[2])
self.score = None
self.mlen = cells[3]
self.qstart = int(cells[4])
self.qend = int(cells[5])
self.pctid = float(cells[7])
self.hit = cells[8]
self.hstart = int(cells[9])
self.hend = int(cells[10])
def parseBlastPlusLine(self, line):
if line[0] == '#':
raise EmptyHitException("Comment line")
cells = line.rstrip('\n\r').split('\t')
self.read = cells[0]
self.hit = cells[1]
self.pctid = float(cells[2])
self.mlen = int(cells[3])
self.mismatch = int(cells[4])
self.gaps = int(cells[5])
self.qstart = int(cells[6])
self.qend = int(cells[7])
self.hstart = int(cells[8])
self.hend = int(cells[9])
self.evalue = parseExp(cells[10])
self.score = float(cells[11])
def parseYanmeiLine(self, line):
cells = line.rstrip('\n\r').split('\t')
self.read = cells[0]
self.hit = cells[1]
self.pctid = float(cells[2])
self.mlen = int(cells[3])
self.mismatch = int(cells[4])
self.gaps = int(cells[5])
self.qstart = int(cells[6])
self.qend = int(cells[7])
self.hstart = int(cells[8])
self.hend = int(cells[9])
self.evalue = parseExp(cells[10])
self.score = float(cells[11])
self.hitDesc = cells[12]
def parseGFFLine(self, line):
if line[0] == '#':
raise EmptyHitException("Comment")
cells = line.rstrip('\n\r').split('\t')
self.read = cells[0]
self.source = cells[1]
self.hit_type = cells[2]
self.qstart = int(cells[3])
self.qend = int(cells[4])
self.score = float(cells[5])
self.strand = cells[6]
hit_data = dict([kv.split('=')
for kv in cells[8].strip(';').split(';')])
if "ID" in hit_data:
self.hit = hit_data['ID']
elif "Name" in hit_data:
self.hit = hit_data['Name']
elif "Target" in hit_data:
self.hit = hit_data['Target'].split()[0]
self.hitDesc = hit_data.get('product', cells[8])
self.evalue = self.score
self.mlen = self.qend + 1 - self.qstart
def getAln(self):
try:
return self.aln
except AttributeError:
sys.exit("Cannot calculate alignment percentage from"
"data in this m8 format")
def checkForOverlap(self, regions, buffer):
# make sure start is smaller than end
start = min(self.qstart, self.qend)
end = max(self.qstart, self.qend)
# adjust start/end to account for buffer
buf_st = start + buffer
buf_en = end - buffer
for i, occupiedRange in enumerate(regions):
# hit cannot intersect an used range
if (buf_st >= occupiedRange[1] or buf_en <= occupiedRange[0]):
# does not overlap this range (try next range)
continue
else:
# overlaps. We are done here
return ((start, end), occupiedRange)
return ((start, end), None)
def checkForOverlapAndAdd(self, regions, buffer):
"""
check to see if this hit overlaps any already hit region.
Regions must overlap by at least "buffer" bases to count.
The regions array should be a list of (Start,end) pairs
indicating regions already hit.
"""
logger.debug(
"Looking for [%d,%d] in %s" %
(self.qstart, self.qend, regions))
# compare with all used ranges
hit_span, overlap_region = self.checkForOverlap(regions, buffer)
if overlap_region is not None:
return None
# we get here if there was no overlap
regions.append(hit_span)
return regions
def getLine(self, options):
return self.line
def to_gff(self):
"""
An attempt to export any hit to GFF. Only partially tested
"""
# The core data in a GFF file:
sequence_id = self.read
source = self.format
feature = self.hitDesc
start = self.qstart
end = self.qend
score = self.score
try:
strand = self.strand
except AttributeError:
strand = "+" if start <= end else "-"
# Ignore phase for now
phase = '.'
# create attribute field for attributes we have
attributes = {}
attributes['Target'] = " ".join(
str(v) for v in [
self.hit,
self.hstart,
self.hend])
try:
attributes['Cigar'] = self.cigar
except AttributeError:
pass
attributes = ';'.join(["{0}={1}".format(k, attributes[k])
for k in attributes])
# create line
gff_line = '\t'.join(str(v) for v in
[sequence_id,
source,
feature,
start,
end,
score,
strand,
phase,
attributes]) \
+ '\n'
return gff_line
#############
# Functions #
#############
def sam_q_order(start, end, flags):
"""
if flags include 16, reverse start end
"""
reverse = flags >= 16 and str(bin(flags))[-5] == "1"
return tuple(sorted([start, end], reverse=reverse))
def computeLastHitValues(blocks):
"""
given the query length and 'blacks' sting from a last hit
return the:
match length
the blocks string looks something like this:
"73,0:1,15,0:1,13,0:1,9"
where integer elements indicate lenghts of matches and
colon separated elements indicate lengths of gaps
"""
matchLen = 0
for segment in blocks.split(','):
try:
matchLen += int(segment)
except ValueError:
(hml, qml) = segment.split(":")
mml = max(int(hml), int(qml))
matchLen += mml
return matchLen
def getReadCol(format):
logger.info("Getting read col for format: %s" % format)
if format == LAST0:
return 6
else:
return 0
def getHitCol(format, useDesc=False):
logger.info("Getting hit col for format: %s" % format)
if format == GENE:
if useDesc:
hitCol = 3
else:
hitCol = 2
elif format == LAST0:
if useDesc:
raise Exception(
"lastal does not report the hit description, sorry")
else:
hitCol = 1
elif format == LIZ:
if useDesc:
hitCol = 2
else:
hitCol = 1
elif format == YANMEI:
if useDesc:
hitCol = 12
else:
hitCol = 1
elif format == BLASTPLUS:
if useDesc:
raise Exception(
"The default blast table does not keep the hit description")
else:
hitCol = 1
elif format == SAM:
if useDesc:
raise Exception("The SAM format does not keep the hit description")
else:
hitCol = 2
else:
sys.exit("I'm sorry, I don't understand the format: %s" % (format))
return hitCol
def filterM8(instream, outstream, params, to_gff=False):
"""
Filter instream and write output to outstream
"""
logger.debug("blastm8.filterM8: reading from {}".format(instream.name))
line_count = 0
if to_gff and params.format != GFF:
for read, hits in filterM8Stream(instream, params, return_lines=False):
for hit in hits:
line_count += 1
outstream.write(hit.to_gff())
else:
for line in filterM8Stream(instream, params, return_lines=True):
line_count += 1
outstream.write(line)
logger.debug("blastm8.filterM8: wrote {} lines".format(line_count))
def sortLines(instream):
logger.info("Sorting input lines")
lines = []
for line in instream:
lines.append(line)
lines.sort()
logger.debug("Done sorting")
return lines.__iter__()
def getHitStream(instream, options):
"""
Simply parse lines into Hits one at a time and return them
"""
for line in instream:
if len(line) == 0:
continue
try:
hit = Hit.getHit(line, options)
except Exception:
print("ERROR parsing line")
print(line)
raise
if hit is not None:
yield hit
def generate_hits(hit_table, format=BLASTPLUS, **filter_args):
"""
yeilds read,hit_iterator tuples
default format is BLAST, change with format=format
See class FilterParams for other arguments
"""
with InputFile(hit_table) as m8stream:
params = FilterParams(format=format, **filter_args)
for read, hits in filterM8Stream(m8stream,
params,
return_lines=False,
):
yield read, hits
logger.debug("Read %d lines from %s table", m8stream.lines, format)
def filterM8Stream(instream, options, return_lines=True):
"""
return an iterator over the lines in given input stream that pass filter
"""
logger.debug("Processing stream: %s" % instream)
current_read = None
hits = []
logger.debug(repr(options))
def build_items_to_generate(current_read, hits,
sort=options.sort,
needs_filter=doWeNeedToFilter(options),
return_lines=return_lines):
if current_read is not None:
logger.debug(
"processing %d hits for %s" %
(len(hits), current_read))
if options.sort is not None:
sortHits(hits, options.sort)
if needs_filter:
hits = filterHits(hits, options)
if return_lines:
for hit in hits:
yield hit.line
else:
if needs_filter:
hits = list(hits)
if len(hits) > 0:
yield (current_read, hits)
for line_hit in getHitStream(instream, options):
if line_hit.read != current_read:
for item in build_items_to_generate(current_read, hits):
yield item
hits = []
current_read = line_hit.read
hits.append(line_hit)
for item in build_items_to_generate(current_read, hits):
yield item
def sortHits(hits, sortType):
"""
take all the hits for a read as a list and sort them by score and hit name
"""
logger.debug("Sorting hits (byScore = %s)" % (sortType))
# set up sort key
if sortType == 'evalue':
sort_key = get_sort_by_evalue_key
elif sortType == 'pctid':
sort_key = get_sort_by_pctid_key
else:
sort_key = get_sort_by_score_key
# sort in place
hits.sort(key=sort_key)
def get_sort_by_evalue_key(hit):
return (hit.evalue, hit.hit)
def get_sort_by_pctid_key(hit):
return (hit.pctid * -1, hit.hit)
def get_sort_by_score_key(hit):
return (hit.score * -1, hit.hit)
def doWeNeedToFilter(options):
""" we can skip the filter step if we're going to let everything through
"""
if options.top_pct >= 0:
return True
if options.bits > 0:
return True
if options.evalue is not None:
return True
if options.pctid > 0:
return True
if options.length > 0:
return True
if options.hits_per_read > 0:
return True
if options.hsps_per_hit > 0:
return True
if options.nonoverlapping >= 0:
return True
if options.bad_refs:
return True
return False
def filterHits(hits, options):
if options.bad_refs:
logger.debug("REmoving bad_refs from hits")
hits = [h for h in hits if h.hit not in options.bad_refs]
# A top_pct cutoff, requires finding the top score first
if options.top_pct >= 0:
# get the best score in this set of hits
bestScore = 0
if options.sort == 'score':
if len(hits) > 0:
bestScore = hits[0].score
else:
for hit in hits:
if hit.score > bestScore:
bestScore = hit.score
tpScore = bestScore - (options.top_pct * bestScore / 100.0)
minScore = max((tpScore, options.bits))
logger.debug(
"Cutoff (%s) is max of bits(%s) and %d%% less than max(%s)" %
(minScore, options.bits, options.top_pct, bestScore))
else:
minScore = options.bits
# apply filters
counted_hits = set()
hit_count = 0
hsp_counts = {}
hit_regions = []
for hit in hits:
hsp_count = hsp_counts.get(hit.hit, 0)
logger.debug("hit: %s::%s - score:%s" % (hit.read, hit.hit, hit.score))
# Simple comparison tests
try:
if minScore > 0 and hit.score < minScore:
logger.debug("score too low: %r" % hit.score)
continue
except ValueError:
if hit.score is None:
raise Exception("This format (%s) does not have a score"
% hit.format)
raise
if options.format != LAST0\
and options.evalue is not None\
and hit.evalue > options.evalue:
logger.debug("evalue too high: %r" % hit.evalue)
continue
# PCTID
try:
if options.pctid > 0 and hit.pctid < options.pctid:
logger.debug("pct ID too low: %r < %r" %
(hit.pctid, options.pctid))
continue
except AttributeError:
raise Exception(
"This hit type (%s) does not have a PCTID defined."
"You cannot filter by PCTID" %
(hit.format))
if abs(hit.mlen) < options.length:
logger.debug("hit too short: %r" % hit.mlen)
continue
if options.aln is not None and hit.getAln() < options.aln:
logger.debug("aln fraction too low: %r" % hit.getAln())
continue
if options.hits_per_read > 0 \
and hit_count >= options.hits_per_read \
and hit.hit not in counted_hits:
logger.debug("Too many hits")
continue
if options.hsps_per_hit > 0 and hsp_count >= options.hsps_per_hit:
logger.debug("Too many HSPs")
continue
if options.nonoverlapping >= 0:
# check for previous overlapping hits
new_hit_regions = hit.checkForOverlapAndAdd(hit_regions,
options.nonoverlapping)
if new_hit_regions is not None:
hit_regions = new_hit_regions
else:
continue
# increment hit counts
hsp_counts[hit.hit] = hsp_count + 1
counted_hits.add(hit.hit)
hit_count = len(counted_hits)
# print hit
yield hit
def add_hit_table_arguments(parser,
defaults={},
flags=['format', 'filter_top_pct']):
"""
Set up command line arguments for parsing and filtering an m8 file.
By default, only the --format and --filter_top_pct options are added, but
any of the following can be enabled using the flags= keyword.
general hit table handling:
format (GENE, LIZ, BLASTPLUS, LAST, ...)
sort (sort hits by 'score', 'pctid', or 'evalue')
filtering options:
filter_top_pct (aka top_pct: minimum pct of best score for other
scores. 0, for best score only, 100 for all hits)
bits (minimum bit score)
evalue
pctid
length (of the alignment)
aln (fraction of query sequence aligned)
hits_per_read
hsps_per_hit
nonoverlapping
For example, flags=['format','bits','filter_top_pct'] would enable
filtering on bit score by cutoff or by percent of the top hit.
flags='all', will turn everything on.
Default values can be changed by passing a dict mapping flags to
new default values. Anything in this dict will be added to the flags list.
"""
# merge explicit defaulst in to flags list
if flags != 'all':
flags = set(flags)
for key in defaults.keys():
flags.add(key)
agroup = parser.add_argument_group(
"Hit Table Options",
"""These options control the parsing and filtering of hit
tables (from blast or lastal)""")
if flags == 'all' or 'format' in flags:
agroup.add_argument(
'-f',
'--format',
dest='hitTableFormat',
default=defaults.get(
"format",
GENE),
choices=[
GENE,
LIZ,
YANMEI,
LAST0,
BLASTPLUS,
SAM,
PAF,
GFF,
CMSEARCH,
CMSCAN,
HMMSCANDOM,
HMMSCAN,
HMMSEARCHDOM,
HMMSEARCH],
help="Format of input table: blast, last, hmmer, gene, "
"yanmei, or liz. Default is %s" % (defaults.get("format",
GENE)))
if flags == 'all' or 'filter_top_pct' in flags:
agroup.add_argument(
'-F',
'--filter_top_pct',
dest='filter_top_pct',
default=defaults.get(
"filter_top_pct",
-1),
type=int,
help="If a positive number is given, only allow hits within "
"this percent of the best hit. Use -1 for no filtering. "
"Use 0 to just take the hit(s) with the best score. "
"Default is {}"
.format(defaults.get("filter_top_pct", -1)))
if flags == 'all' or 'bits' in flags:
agroup.add_argument('-B', '--bitScore', dest='filter_bits',
type=int, default=defaults.get("bits", 0),
help="Minimum bit score to allow. Default: \
{}".format(defaults.get('bits', 0)))
if flags == 'all' or 'evalue' in flags:
agroup.add_argument('-E', '--evalue', dest='filter_evalue',
type=float, default=defaults.get("evalue", None),
help="Maximum evalue to allow. Default: \
{}".format(defaults.get('evalue', None)))
if flags == 'all' or 'pctid' in flags:
defVal = defaults.get("pctid", 0)
agroup.add_argument(
'-I',
'--pctid',
dest='filter_pctid',
type=float,
default=defVal,
help=("Minimum percent identity to allow in range 0 to 100. "
"Default: %s" % (defVal))
)
if flags == 'all' or 'length' in flags:
default = defaults.get("length", 0)
agroup.add_argument(
'-L',
'--length',
dest='filter_length',
type=int,
default=default,
help="Minimum alignment length to allow. Default: %s" %
(default))
if flags == 'all' or 'aln' in flags:
default = defaults.get("aln", None)
agroup.add_argument(
'-N',
'--aln',
dest='filter_aln',
type=float,
default=default,
help="Minimum aligned fraction to allow. Default: %s" %
(default))
if flags == 'all' or 'hits_per_read' in flags:
default = defaults.get("hits_per_read", 0)
agroup.add_argument(
'-H',
'--hits_per_read',
dest='filter_hits_per_read',
type=int,
default=default,
help="Maximum number of hits to allow per read. 0 for all hits. "
"Default: %s" % (default))
if flags == 'all' or 'hsps_per_hit' in flags:
default = defaults.get("hsps_per_hit", 0)
agroup.add_argument(
'-P',
'--hsps_per_hit',
dest='filter_hsps_per_hit',
type=int,
default=default,
help="Maximum number of HSPs to keep per hit. 0 for all HSPs. "
"Default: %s" % (default))
if flags == 'all' or 'nonoverlapping' in flags:
default = defaults.get("nonoverlapping", -1)
agroup.add_argument(
'-U',
'--nonoverlapping',
nargs="?",
type=int,
const=0,
default=default,
action='store',
dest='filter_nonoverlapping',
help="Ignore hits which overlap higher scoring hits. Optional "
"integer value may be specified to ignore small overlaps. "
"A negative value allows all overlaps. "
"Default: %s" % (default))
if flags == 'all' or 'sort' in flags:
default = defaults.get("sort", None)
agroup.add_argument(
'-s',
'--sort',
dest='hitTableSort',
default=default,
choices=[
'evalue',
'pctid',
'score'],
help="sort hits for each read by 'evalue', 'pctid' or 'score' "
"before "
"filtering. Secondarily sorted by hit id to make output "
"more deterministic")
def parseCigarString(cigar):
"""
M alignment match (can be a sequence match or mismatch)
I insertion to the reference
D deletion from the reference
N skipped region from the reference
S soft clipping (clipped sequences present in SEQ)
H hard clipping (clipped sequences NOT present in SEQ)
P padding (silent deletion from padded reference)
= sequence match
X sequence mismatch
So, 5S6M1I4M means the first 5 bases were soft masked, the next 6 match,
then an insertion in the query, then 4 matches.
"""
alen = 0
alenh = 0
alenq = 0
qstart = 1
matches = 0
mismatches = 0
pctid = 0
for cigarBit in cigarRE.findall(cigar):
bitlen = int(cigarBit[:-1])
bittype = cigarBit[-1]
if bittype == 'S' or bittype == 'H':
if alenq == 0:
qstart += bitlen
else:
# done reading matches, don't care about unmatched end
break
elif bittype == 'M':
alen += bitlen
alenh += bitlen
alenq += bitlen
elif bittype == 'I':
alenq += bitlen
elif bittype == 'D' or bittype == 'N':
alenh += bitlen
elif bittype == '=':
alen += bitlen
alenh += bitlen
alenq += bitlen
matches += bitlen
elif bittype == 'X':
alen += bitlen
alenh += bitlen
alenq += bitlen
mismatches += bitlen
else:
raise Exception(
"Unrecognized CIGAR tag (%s) in string: %s" %
(bittype, cigar))
if matches > 0:
# (if no matches found, leave pctid at "0" so it is ignored)
pctid = matches / float(matches + mismatches)
qend = qstart + alenq - 1
return (alen, alenh, alenq, qstart, qend, pctid)
capturing_digits_re = re.compile(r'(\d+)')
def get_alignment_percent_identity(mdx_string):
"""
Use the MD:Z string returned by BWA to get percent ID
"""
matches = 0
mismatches = 0
for chunk in capturing_digits_re.split(mdx_string):
if len(chunk) == 0:
# first and last elements are often empty strings. Ignore them
continue
if chunk.startswith('^'):
# this is an deletion, irrelevant for pctid
continue
try:
# is it an integer? it's the nuimber of matches
matches += int(chunk)
except ValueError:
# otherwise, it's the string of the reference that was mismatched
mismatches += len(chunk)
return 100 * float(matches) / float(matches + mismatches)
def setup_tests():
import sys
global myAssertEq, myAssertIs
from edl.test import myAssertEq, myAssertIs
if len(sys.argv) > 1:
loglevel = logging.DEBUG
else:
loglevel = logging.WARN
logging.basicConfig(stream=sys.stderr, level=loglevel)
def test():
setup_tests()
m8data = ["001598_1419_3101 H186x25M length=284 uaccno=E3N7QM101DQXE3"
" gi|91763278|ref|ZP_01265242.1| Peptidase family"
" M48 [Candidatus Pelagibacter ubique HTCC1002]"
" 61.7021276595745 94 282 1 57 150 134 4e-30"
" 0.992957746478873\n",
"001598_1419_3101 H186x25M length=284 uaccno=E3N7QM101DQXE3"
" gi|71083682|ref|YP_266402.1|"
" M48 family peptidase [Candidatus Pelagibacter ubique"
" HTCC1062] 61.7021276595745 94 282 1 40 133 134"
" 4e-30 0.992957746478873\n",
"001598_1419_3101 H186x25M length=284 uaccno=E3N7QM101DQXE3"
" gi|262277211|ref|ZP_06055004.1| peptidase family M48 family"
" [alpha proteobacterium HIMB114] 65.9090909090909 88 264"
" 1 63 150 132 9e-30 0.929577464788732\n",
"001598_1419_3101 H186x25M length=284 uaccno=E3N7QM101DQXE3"
" gi|254456035|ref|ZP_05069464.1| peptidase family M48"
" [Candidatus Pelagibacter sp. HTCC7211] 66.2790697674419"
" 86 258 1 65 150 132 2e-29 0.908450704225352\n",
"001598_1419_3101 H186x25M length=284 uaccno=E3N7QM101DQXE3"
" gi|118581678|ref|YP_902928.1| peptidase M48, Ste24p"
" [Pelobacter propionicus DSM 2379] 51.6129032258064 93"
" 282 4 53 144 108 2e-22 0.982394366197183\n",
"001598_1419_3101 H186x25M length=284 uaccno=E3N7QM101DQXE3"
" gi|255534285|ref|YP_003094656.1| zn-dependent protease"
" with chaperone function [Flavobacteriaceae bacterium"
" 3519-10] 47.3118279569892 93 282 4 55 146 102"
" 1e-20 0.982394366197183\n",
"001598_1419_3101 H186x25M length=284 uaccno=E3N7QM101DQXE3"
" gi|317502588|ref|ZP_07960709.1| M48B family peptidase"
" [Prevotella salivae DSM 15606] 51.6129032258064 93"
" 279 1 85 176 100 7e-20 0.982394366197183\n",
"001598_1419_3101 H186x25M length=284 uaccno=E3N7QM101DQXE3"
" gi|325104752|ref|YP_004274406.1| peptidase M48 Ste24p"
" [Pedobacter saltans DSM 12145] 48.3870967741936 93"
" 279 1 59 150 100 7e-20 0.982394366197183\n",
"001598_1419_3101 H186x25M length=284 uaccno=E3N7QM101DQXE3"
" gi|256425464|ref|YP_003126117.1| peptidase M48 Ste24p"
" [Chitinophaga pinensis DSM 2588] 48.8888888888889"
" 90 273 4 58 146 99.8 9e-20 0.950704225352113\n",
"001598_1419_3101 H186x25M length=284 uaccno=E3N7QM101DQXE3"
" gi|299142895|ref|ZP_07036022.1| peptidase, M48 family"
" [Prevotella oris C735] 50 94 282 1 58 150 99.4"
" 1e-19 0.992957746478873\n"]
# test1 passthrough
logging.info("Starting passthrough test")
m8stream = m8data.__iter__()
params = FilterParams()
outs = filterM8Stream(m8stream, params, return_lines=True)
myAssertEq(next(outs), m8data[0])
myAssertEq(next(outs), m8data[1])
myAssertEq(next(outs), m8data[2])
myAssertEq(next(outs), m8data[3])
myAssertEq(next(outs), m8data[4])
logging.info("Starting best test")
m8stream = m8data.__iter__()
params = FilterParams(top_pct=0.)
outs = filterM8Stream(m8stream, params)
myAssertEq(next(outs), m8data[0])
myAssertEq(next(outs), m8data[1])
try:
next(outs)
sys.exit("There should only be 2 elements!")
except StopIteration:
pass
logging.info("Starting n1 test")
m8stream = m8data.__iter__()
params = FilterParams(hits_per_read=1)
outs = filterM8Stream(m8stream, params)
myAssertEq(next(outs), m8data[0])
try:
next(outs)
sys.exit("There should only be 1 elements!")
except StopIteration:
pass
def test_gff():
setup_tests()
line = 'KM282-20-02b-5_c283151\tcsearch\ttRNA\t303\t233\t' + \
'40.6\t-\t.\tTarget=RF00005 2 70\n'
hit = Hit(line, 'gff')
myAssertEq(hit.read, 'KM282-20-02b-5_c283151')
myAssertEq(hit.score, 40.6)
myAssertEq(hit.hit, 'RF00005')
line = 'KM282-20-02a-100_c12273\tbarrnap:0.7\trRNA\t9\t772\t' + \
'6.8e-41\t+\t.\tName=12S_rRNA;product=12S ribosomal RNA\n'
hit = Hit(line, 'gff')
myAssertEq(hit.read, 'KM282-20-02a-100_c12273')
myAssertEq(hit.evalue, 6.8e-41)
myAssertEq(hit.hit, '12S_rRNA')
line = 'KM282-20-02a-100_c1\tProdigal_v2.6.2\tCDS\t309\t686\t' + \
'53.3\t-\t0\tID=1_2;partial=00;start_type=ATG;rbs_motif=' + \
'AGGA;rbs_spacer=5-10bp;gc_cont=0.381;conf=100.00;score=' + \
'54.54;cscore=45.68;sscore=8.86;rscore=5.50;uscore=-0.63;' + \
'tscore=2.76;\n'
hit = Hit(line, 'gff')
myAssertEq(hit.read, 'KM282-20-02a-100_c1')
myAssertEq(hit.score, 53.3)
myAssertEq(hit.hit, '1_2')
if __name__ == '__main__':
test()
|
jmeppley/py-metagenomics
|
edl/blastm8.py
|
Python
|
mit
| 46,178
|
[
"BLAST",
"BWA"
] |
93c494cd81531103219903c50ead474f33a16d891be2ba028dd504ef52b1c668
|
# Copyright: (c) 2013, James Cammarata <jcammarata@ansible.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import re
import shutil
import textwrap
import time
import yaml
from jinja2 import BaseLoader, Environment, FileSystemLoader
import ansible.constants as C
from ansible import context
from ansible.cli import CLI
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy, get_collections_galaxy_meta_info
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.collection import build_collection, install_collections, parse_collections_requirements_file, \
publish_collection
from ansible.galaxy.login import GalaxyLogin
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import GalaxyToken
from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.playbook.role.requirement import RoleRequirement
from ansible.utils.collection_loader import is_collection_ref
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_versioned_doclink
display = Display()
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
def __init__(self, args):
# Inject role into sys.argv[1] as a backwards compatibility step
if len(args) > 1 and args[1] not in ['-h', '--help'] and 'role' not in args and 'collection' not in args:
# TODO: Should we add a warning here and eventually deprecate the implicit role subcommand choice
args.insert(1, 'role')
self.api = None
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
def init_parser(self):
''' create an options parser for bin/ansible '''
super(GalaxyCLI, self).init_parser(
desc="Perform various Role and Collection related operations.",
)
# common
common = opt_help.argparse.ArgumentParser(add_help=False)
common.add_argument('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination')
common.add_argument('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS,
help='Ignore SSL certificate validation errors.')
opt_help.add_verbosity_options(common)
# options that apply to more than one action
user_repo = opt_help.argparse.ArgumentParser(add_help=False)
user_repo.add_argument('github_user', help='GitHub username')
user_repo.add_argument('github_repo', help='GitHub repository')
offline = opt_help.argparse.ArgumentParser(add_help=False)
offline.add_argument('--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
default_roles_path = C.config.get_configuration_definition('DEFAULT_ROLES_PATH').get('default', '')
roles_path = opt_help.argparse.ArgumentParser(add_help=False)
roles_path.add_argument('-p', '--roles-path', dest='roles_path', type=opt_help.unfrack_path(pathsep=True),
default=C.DEFAULT_ROLES_PATH, action=opt_help.PrependListAction,
help='The path to the directory containing your roles. The default is the first writable one'
'configured via DEFAULT_ROLES_PATH: %s ' % default_roles_path)
force = opt_help.argparse.ArgumentParser(add_help=False)
force.add_argument('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role or collection')
# Add sub parser for the Galaxy role type (role or collection)
type_parser = self.parser.add_subparsers(metavar='TYPE', dest='type')
type_parser.required = True
# Define the actions for the collection object type
collection = type_parser.add_parser('collection',
parents=[common],
help='Manage an Ansible Galaxy collection.')
collection_parser = collection.add_subparsers(metavar='ACTION', dest='collection')
collection_parser.required = True
build_parser = collection_parser.add_parser(
'build', help='Build an Ansible collection artifact that can be published to Ansible Galaxy.',
parents=[common, force])
build_parser.set_defaults(func=self.execute_build)
build_parser.add_argument(
'args', metavar='collection', nargs='*', default=('./',),
help='Path to the collection(s) directory to build. This should be the directory that contains the '
'galaxy.yml file. The default is the current working directory.')
build_parser.add_argument(
'--output-path', dest='output_path', default='./',
help='The path in which the collection is built to. The default is the current working directory.')
self.add_init_parser(collection_parser, [common, force])
cinstall_parser = collection_parser.add_parser('install', help='Install collection from Ansible Galaxy',
parents=[force, common])
cinstall_parser.set_defaults(func=self.execute_install)
cinstall_parser.add_argument('args', metavar='collection_name', nargs='*',
help='The collection(s) name or path/url to a tar.gz collection artifact. This '
'is mutually exclusive with --requirements-file.')
cinstall_parser.add_argument('-p', '--collections-path', dest='collections_path', required=True,
help='The path to the directory containing your collections.')
cinstall_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors during installation and continue with the next specified '
'collection. This will not ignore dependency conflict errors.')
cinstall_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be installed.')
cinstall_exclusive = cinstall_parser.add_mutually_exclusive_group()
cinstall_exclusive.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download collections listed as dependencies")
cinstall_exclusive.add_argument('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
help="Force overwriting an existing collection and its dependencies")
publish_parser = collection_parser.add_parser(
'publish', help='Publish a collection artifact to Ansible Galaxy.',
parents=[common])
publish_parser.set_defaults(func=self.execute_publish)
publish_parser.add_argument(
'args', metavar='collection_path', help='The path to the collection tarball to publish.')
publish_parser.add_argument(
'--api-key', dest='api_key',
help='The Ansible Galaxy API key which can be found at https://galaxy.ansible.com/me/preferences. '
'You can also use ansible-galaxy login to retrieve this key.')
publish_parser.add_argument(
'--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import validation results.")
# Define the actions for the role object type
role = type_parser.add_parser('role',
parents=[common],
help='Manage an Ansible Galaxy role.')
role_parser = role.add_subparsers(metavar='ACTION', dest='role')
role_parser.required = True
delete_parser = role_parser.add_parser('delete', parents=[user_repo, common],
help='Removes the role from Galaxy. It does not remove or alter the actual GitHub repository.')
delete_parser.set_defaults(func=self.execute_delete)
import_parser = role_parser.add_parser('import', help='Import a role', parents=[user_repo, common])
import_parser.set_defaults(func=self.execute_import)
import_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True, help="Don't wait for import results.")
import_parser.add_argument('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
import_parser.add_argument('--role-name', dest='role_name', help='The name the role should have, if different than the repo name')
import_parser.add_argument('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_user/github_repo.')
info_parser = role_parser.add_parser('info', help='View more details about a specific role.',
parents=[offline, common, roles_path])
info_parser.set_defaults(func=self.execute_info)
info_parser.add_argument('args', nargs='+', help='role', metavar='role_name[,version]')
rinit_parser = self.add_init_parser(role_parser, [offline, force, common])
rinit_parser.add_argument('--type',
dest='role_type',
action='store',
default='default',
help="Initialize using an alternate role type. Valid types include: 'container', 'apb' and 'network'.")
install_parser = role_parser.add_parser('install', help='Install Roles from file(s), URL(s) or tar file(s)',
parents=[force, common, roles_path])
install_parser.set_defaults(func=self.execute_install)
install_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors and continue with the next specified role.')
install_parser.add_argument('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported')
install_parser.add_argument('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
default=False, help='Use tar instead of the scm archive option when packaging the role')
install_parser.add_argument('args', help='Role name, URL or tar file', metavar='role', nargs='*')
install_exclusive = install_parser.add_mutually_exclusive_group()
install_exclusive.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download roles listed as dependencies")
install_exclusive.add_argument('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
help="Force overwriting an existing role and it's dependencies")
remove_parser = role_parser.add_parser('remove', help='Delete roles from roles_path.', parents=[common, roles_path])
remove_parser.set_defaults(func=self.execute_remove)
remove_parser.add_argument('args', help='Role(s)', metavar='role', nargs='+')
list_parser = role_parser.add_parser('list', help='Show the name and version of each role installed in the roles_path.',
parents=[common, roles_path])
list_parser.set_defaults(func=self.execute_list)
list_parser.add_argument('role', help='Role', nargs='?', metavar='role')
login_parser = role_parser.add_parser('login', parents=[common],
help="Login to api.github.com server in order to use ansible-galaxy role "
"sub command such as 'import', 'delete', 'publish', and 'setup'")
login_parser.set_defaults(func=self.execute_login)
login_parser.add_argument('--github-token', dest='token', default=None,
help='Identify with github token rather than username and password.')
search_parser = role_parser.add_parser('search', help='Search the Galaxy database by tags, platforms, author and multiple keywords.',
parents=[common])
search_parser.set_defaults(func=self.execute_search)
search_parser.add_argument('--platforms', dest='platforms', help='list of OS platforms to filter by')
search_parser.add_argument('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
search_parser.add_argument('--author', dest='author', help='GitHub username')
search_parser.add_argument('args', help='Search terms', metavar='searchterm', nargs='*')
setup_parser = role_parser.add_parser('setup', help='Manage the integration between Galaxy and the given source.',
parents=[roles_path, common])
setup_parser.set_defaults(func=self.execute_setup)
setup_parser.add_argument('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see ID values.')
setup_parser.add_argument('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.')
setup_parser.add_argument('source', help='Source')
setup_parser.add_argument('github_user', help='GitHub username')
setup_parser.add_argument('github_repo', help='GitHub repository')
setup_parser.add_argument('secret', help='Secret')
def add_init_parser(self, parser, parents):
galaxy_type = parser.dest
obj_name_kwargs = {}
if galaxy_type == 'collection':
obj_name_kwargs['type'] = GalaxyCLI._validate_collection_name
init_parser = parser.add_parser('init',
help='Initialize new {0} with the base structure of a {0}.'.format(galaxy_type),
parents=parents)
init_parser.set_defaults(func=self.execute_init)
init_parser.add_argument('--init-path',
dest='init_path',
default='./',
help='The path in which the skeleton {0} will be created. The default is the current working directory.'.format(galaxy_type))
init_parser.add_argument('--{0}-skeleton'.format(galaxy_type),
dest='{0}_skeleton'.format(galaxy_type),
default=C.GALAXY_ROLE_SKELETON,
help='The path to a {0} skeleton that the new {0} should be based upon.'.format(galaxy_type))
init_parser.add_argument('{0}_name'.format(galaxy_type),
help='{0} name'.format(galaxy_type.capitalize()),
**obj_name_kwargs)
return init_parser
def post_process_args(self, options):
options = super(GalaxyCLI, self).post_process_args(options)
display.verbosity = options.verbosity
return options
def run(self):
super(GalaxyCLI, self).run()
self.galaxy = Galaxy()
self.api = GalaxyAPI(self.galaxy)
context.CLIARGS['func']()
@staticmethod
def exit_without_ignore(rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not context.CLIARGS['ignore_errors']:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
@staticmethod
def _display_role_info(role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
text.append(u"\tdescription: %s" % role_info.get('description', ''))
for k in sorted(role_info.keys()):
if k in GalaxyCLI.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in GalaxyCLI.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
return u'\n'.join(text)
@staticmethod
def _resolve_path(path):
return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
@staticmethod
def _validate_collection_name(name):
if is_collection_ref('ansible_collections.{0}'.format(name)):
return name
raise AnsibleError("Invalid collection name, must be in the format <namespace>.<collection>")
@staticmethod
def _get_skeleton_galaxy_yml(template_path, inject_data):
with open(to_bytes(template_path, errors='surrogate_or_strict'), 'rb') as template_obj:
meta_template = to_text(template_obj.read(), errors='surrogate_or_strict')
galaxy_meta = get_collections_galaxy_meta_info()
required_config = []
optional_config = []
for meta_entry in galaxy_meta:
config_list = required_config if meta_entry.get('required', False) else optional_config
value = inject_data.get(meta_entry['key'], None)
if not value:
meta_type = meta_entry.get('type', 'str')
if meta_type == 'str':
value = ''
elif meta_type == 'list':
value = []
elif meta_type == 'dict':
value = {}
meta_entry['value'] = value
config_list.append(meta_entry)
link_pattern = re.compile(r"L\(([^)]+),\s+([^)]+)\)")
const_pattern = re.compile(r"C\(([^)]+)\)")
def comment_ify(v):
if isinstance(v, list):
v = ". ".join([l.rstrip('.') for l in v])
v = link_pattern.sub(r"\1 <\2>", v)
v = const_pattern.sub(r"'\1'", v)
return textwrap.fill(v, width=117, initial_indent="# ", subsequent_indent="# ", break_on_hyphens=False)
def to_yaml(v):
return yaml.safe_dump(v, default_flow_style=False).rstrip()
env = Environment(loader=BaseLoader)
env.filters['comment_ify'] = comment_ify
env.filters['to_yaml'] = to_yaml
template = env.from_string(meta_template)
meta_value = template.render({'required_config': required_config, 'optional_config': optional_config})
return meta_value
############################
# execute actions
############################
def execute_role(self):
"""
Perform the action on an Ansible Galaxy role. Must be combined with a further action like delete/install/init
as listed below.
"""
# To satisfy doc build
pass
def execute_collection(self):
"""
Perform the action on an Ansible Galaxy collection. Must be combined with a further action like init/install as
listed below.
"""
# To satisfy doc build
pass
def execute_build(self):
"""
Build an Ansible Galaxy collection artifact that can be stored in a central repository like Ansible Galaxy.
By default, this command builds from the current working directory. You can optionally pass in the
collection input path (where the ``galaxy.yml`` file is).
"""
force = context.CLIARGS['force']
output_path = GalaxyCLI._resolve_path(context.CLIARGS['output_path'])
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
elif os.path.isfile(b_output_path):
raise AnsibleError("- the output collection directory %s is a file - aborting" % to_native(output_path))
for collection_path in context.CLIARGS['args']:
collection_path = GalaxyCLI._resolve_path(collection_path)
build_collection(collection_path, output_path, force)
def execute_init(self):
"""
Creates the skeleton framework of a role or collection that complies with the Galaxy metadata format.
Requires a role or collection name. The collection name must be in the format ``<namespace>.<collection>``.
"""
galaxy_type = context.CLIARGS['type']
init_path = context.CLIARGS['init_path']
force = context.CLIARGS['force']
obj_skeleton = context.CLIARGS['{0}_skeleton'.format(galaxy_type)]
obj_name = context.CLIARGS['{0}_name'.format(galaxy_type)]
inject_data = dict(
description='your {0} description'.format(galaxy_type),
ansible_plugin_list_dir=get_versioned_doclink('plugins/plugins.html'),
)
if galaxy_type == 'role':
inject_data.update(dict(
author='your name',
company='your company (optional)',
license='license (GPL-2.0-or-later, MIT, etc)',
role_name=obj_name,
role_type=context.CLIARGS['role_type'],
issue_tracker_url='http://example.com/issue/tracker',
repository_url='http://example.com/repository',
documentation_url='http://docs.example.com',
homepage_url='http://example.com',
min_ansible_version=ansible_version[:3], # x.y
))
obj_path = os.path.join(init_path, obj_name)
elif galaxy_type == 'collection':
namespace, collection_name = obj_name.split('.', 1)
inject_data.update(dict(
namespace=namespace,
collection_name=collection_name,
version='1.0.0',
readme='README.md',
authors=['your name <example@domain.com>'],
license=['GPL-2.0-or-later'],
repository='http://example.com/repository',
documentation='http://docs.example.com',
homepage='http://example.com',
issues='http://example.com/issue/tracker',
))
obj_path = os.path.join(init_path, namespace, collection_name)
b_obj_path = to_bytes(obj_path, errors='surrogate_or_strict')
if os.path.exists(b_obj_path):
if os.path.isfile(obj_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % to_native(obj_path))
elif not force:
raise AnsibleError("- the directory %s already exists. "
"You can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % to_native(obj_path))
if obj_skeleton is not None:
own_skeleton = False
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
else:
own_skeleton = True
obj_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
obj_skeleton = os.path.expanduser(obj_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
if not os.path.exists(obj_skeleton):
raise AnsibleError("- the skeleton path '{0}' does not exist, cannot init {1}".format(
to_native(obj_skeleton), galaxy_type)
)
template_env = Environment(loader=FileSystemLoader(obj_skeleton))
# create role directory
if not os.path.exists(b_obj_path):
os.makedirs(b_obj_path)
for root, dirs, files in os.walk(obj_skeleton, topdown=True):
rel_root = os.path.relpath(root, obj_skeleton)
rel_dirs = rel_root.split(os.sep)
rel_root_dir = rel_dirs[0]
if galaxy_type == 'collection':
# A collection can contain templates in playbooks/*/templates and roles/*/templates
in_templates_dir = rel_root_dir in ['playbooks', 'roles'] and 'templates' in rel_dirs
else:
in_templates_dir = rel_root_dir == 'templates'
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
elif galaxy_type == 'collection' and own_skeleton and rel_root == '.' and f == 'galaxy.yml.j2':
# Special use case for galaxy.yml.j2 in our own default collection skeleton. We build the options
# dynamically which requires special options to be set.
# The templated data's keys must match the key name but the inject data contains collection_name
# instead of name. We just make a copy and change the key back to name for this file.
template_data = inject_data.copy()
template_data['name'] = template_data.pop('collection_name')
meta_value = GalaxyCLI._get_skeleton_galaxy_yml(os.path.join(root, rel_root, f), template_data)
b_dest_file = to_bytes(os.path.join(obj_path, rel_root, filename), errors='surrogate_or_strict')
with open(b_dest_file, 'wb') as galaxy_obj:
galaxy_obj.write(to_bytes(meta_value, errors='surrogate_or_strict'))
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(rel_root, f)
dest_file = os.path.join(obj_path, rel_root, filename)
template_env.get_template(src_template).stream(inject_data).dump(dest_file, encoding='utf-8')
else:
f_rel_path = os.path.relpath(os.path.join(root, f), obj_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(obj_path, f_rel_path))
for d in dirs:
b_dir_path = to_bytes(os.path.join(obj_path, rel_root, d), errors='surrogate_or_strict')
if not os.path.exists(b_dir_path):
os.makedirs(b_dir_path)
display.display("- %s %s was created successfully" % (galaxy_type.title(), obj_name))
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
roles_path = context.CLIARGS['roles_path']
data = ''
for role in context.CLIARGS['args']:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['installed_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if not context.CLIARGS['offline']:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data = self._display_role_info(role_info)
# FIXME: This is broken in both 1.9 and 2.0 as
# _display_role_info() always returns something
if not data:
data = u"\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
Install one or more roles(``ansible-galaxy role install``), or one or more collections(``ansible-galaxy collection install``).
You can pass in a list (roles or collections) or use the file
option listed below (these are mutually exclusive). If you pass in a list, it
can be a name (which will be downloaded via the galaxy API and github), or it can be a local tar archive file.
"""
if context.CLIARGS['type'] == 'collection':
collections = context.CLIARGS['args']
force = context.CLIARGS['force']
output_path = context.CLIARGS['collections_path']
# TODO: use a list of server that have been configured in ~/.ansible_galaxy
servers = [context.CLIARGS['api_server']]
ignore_certs = context.CLIARGS['ignore_certs']
ignore_errors = context.CLIARGS['ignore_errors']
requirements_file = context.CLIARGS['requirements']
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
if collections and requirements_file:
raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.")
elif not collections and not requirements_file:
raise AnsibleError("You must specify a collection name or a requirements file.")
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
collection_requirements = parse_collections_requirements_file(requirements_file)
else:
collection_requirements = []
for collection_input in collections:
name, dummy, requirement = collection_input.partition(':')
collection_requirements.append((name, requirement or '*', None))
output_path = GalaxyCLI._resolve_path(output_path)
collections_path = C.COLLECTIONS_PATHS
if len([p for p in collections_path if p.startswith(output_path)]) == 0:
display.warning("The specified collections path '%s' is not part of the configured Ansible "
"collections paths '%s'. The installed collection won't be picked up in an Ansible "
"run." % (to_text(output_path), to_text(":".join(collections_path))))
if os.path.split(output_path)[1] != 'ansible_collections':
output_path = os.path.join(output_path, 'ansible_collections')
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
install_collections(collection_requirements, output_path, servers, (not ignore_certs), ignore_errors,
no_deps, force, force_deps)
return 0
role_file = context.CLIARGS['role_file']
if not context.CLIARGS['args'] and role_file is None:
# the user needs to specify one of either --role-file or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
force = context.CLIARGS['force'] or force_deps
roles_left = []
if role_file:
try:
f = open(role_file, 'r')
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
try:
required_roles = yaml.safe_load(f.read())
except Exception as e:
raise AnsibleError(
"Unable to load data from the requirements file (%s): %s" % (role_file, to_native(e))
)
if required_roles is None:
raise AnsibleError("No roles found in file: %s" % role_file)
for role in required_roles:
if "include" not in role:
role = RoleRequirement.role_yaml_parse(role)
display.vvv("found role %s in yaml file" % str(role))
if "name" not in role and "scm" not in role:
raise AnsibleError("Must specify name or src for role")
roles_left.append(GalaxyRole(self.galaxy, **role))
else:
with open(role["include"]) as f_include:
try:
roles_left += [
GalaxyRole(self.galaxy, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include))
]
except Exception as e:
msg = "Unable to load data from the include requirements file: %s %s"
raise AnsibleError(msg % (role_file, e))
else:
raise AnsibleError("Invalid role requirements file")
f.close()
except (IOError, OSError) as e:
raise AnsibleError('Unable to open %s: %s' % (role_file, to_native(e)))
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in context.CLIARGS['args']:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
for role in roles_left:
# only process roles in roles files when names matches if given
if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']:
display.vvv('Skipping role %s' % role.name)
continue
display.vvv('Processing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version or force:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
if not role.metadata:
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
else:
role_dependencies = role.metadata.get('dependencies') or []
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in roles_left:
display.display('- adding dependency: %s' % to_text(dep_role))
roles_left.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
if force_deps:
display.display('- changing dependant role %s from %s to %s' %
(dep_role.name, dep_role.install_info['version'], dep_role.version or "unspecified"))
dep_role.remove()
roles_left.append(dep_role)
else:
display.warning('- dependency %s (%s) from role %s differs from already installed version (%s), skipping' %
(to_text(dep_role), dep_role.version, role.name, dep_role.install_info['version']))
else:
if force_deps:
roles_left.append(dep_role)
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if not context.CLIARGS['args']:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in context.CLIARGS['args']:
role = GalaxyRole(self.galaxy, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
return 0
def execute_list(self):
"""
lists the roles installed on the local system or matches a single role passed as an argument.
"""
def _display_role(gr):
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (gr.name, version))
if context.CLIARGS['role']:
# show the requested role, if it exists
name = context.CLIARGS['role']
gr = GalaxyRole(self.galaxy, name)
if gr.metadata:
display.display('# %s' % os.path.dirname(gr.path))
_display_role(gr)
else:
display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = context.CLIARGS['roles_path']
path_found = False
warnings = []
for path in roles_path:
role_path = os.path.expanduser(path)
if not os.path.exists(role_path):
warnings.append("- the configured path %s does not exist." % role_path)
continue
elif not os.path.isdir(role_path):
warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
continue
display.display('# %s' % role_path)
path_files = os.listdir(role_path)
path_found = True
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file, path=path)
if gr.metadata:
_display_role(gr)
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths was usable. Please specify a valid path with --roles-path")
return 0
def execute_publish(self):
"""
Publish a collection into Ansible Galaxy. Requires the path to the collection tarball to publish.
"""
api_key = context.CLIARGS['api_key'] or GalaxyToken().get()
api_server = context.CLIARGS['api_server']
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['args'])
ignore_certs = context.CLIARGS['ignore_certs']
wait = context.CLIARGS['wait']
publish_collection(collection_path, api_server, api_key, ignore_certs, wait)
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if context.CLIARGS['args']:
search = '+'.join(context.CLIARGS['args'])
if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],
tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_login(self):
"""
verify user's identify via Github and retrieve an auth token from Ansible Galaxy.
"""
# Authenticate with github and retrieve a token
if context.CLIARGS['token'] is None:
if C.GALAXY_TOKEN:
github_token = C.GALAXY_TOKEN
else:
login = GalaxyLogin(self.galaxy)
github_token = login.create_github_token()
else:
github_token = context.CLIARGS['token']
galaxy_response = self.api.authenticate(github_token)
if context.CLIARGS['token'] is None and C.GALAXY_TOKEN is None:
# Remove the token we created
login.remove_github_token()
# Store the Galaxy token
token = GalaxyToken()
token.set(galaxy_response['token'])
display.display("Successfully logged into Galaxy as %s" % galaxy_response['username'])
return 0
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
github_user = to_text(context.CLIARGS['github_user'], errors='surrogate_or_strict')
github_repo = to_text(context.CLIARGS['github_repo'], errors='surrogate_or_strict')
if context.CLIARGS['check_status']:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo,
reference=context.CLIARGS['reference'],
role_name=context.CLIARGS['role_name'])
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not context.CLIARGS['wait']:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if context.CLIARGS['check_status'] or context.CLIARGS['wait']:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if context.CLIARGS['setup_list']:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if context.CLIARGS['remove_id']:
# Remove a secret
self.api.remove_secret(context.CLIARGS['remove_id'])
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
source = context.CLIARGS['source']
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
secret = context.CLIARGS['secret']
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return True
|
cchurch/ansible
|
lib/ansible/cli/galaxy.py
|
Python
|
gpl-3.0
| 50,152
|
[
"Galaxy"
] |
3be1a9f4e54aa88688105234ae2a588cfdb3e943c9f8f4d5b6a0318aab9bc72e
|
#!/usr/bin/env python
# Ryan G. Coleman, Kim A. Sharp, http://crystal.med.upenn.edu,
# ryan.g.coleman ATSYMBOL gmail.com ryangc ATSYMBOL mail.med.upenn.edu
#file takes care of reading .crg files and putting them into dictionary struct
#charge files are column based and look like this:
#NH1 ARG -0.350
#0123456789012345678901234
# 1 2
import string
import sys
import os
class charge(object):
'''reads in a .crg file, makes a usable data structure'''
hydrophobicThreshold = 0.45
def __init__(
self, chargeFileName="$TDHOME/src/charge_parameters/parse.crg",
altChg="/disks/node18/coleman/src/charge_parameters/parse.crg"):
'''reads in the file, sets up the structure, etc'''
chargeFileName = os.path.expandvars(chargeFileName)
altChg = os.path.expandvars(altChg)
self.residues = {}
self.atoms = {}
try:
chargeFile = open(chargeFileName, 'r')
except IOError:
chargeFile = open(altChg, 'r')
try:
for line in chargeFile:
if line[0] == '!' or line[:22] == 'atom__resnumbc_charge_' or \
line[:22] == 'aaaaaarrrnnnncqqqqqqqq':
pass # means a comment
else:
try:
atom = string.strip(line[0:4]).upper()
res = string.strip(line[5:14]).upper()
ch = float(line[15:22])
if len(res) == 0: # no residue, default for atoms if not found
self.atoms[atom] = ch
else:
if res not in self.residues:
self.residues[res] = {}
self.residues[res][atom] = ch
except TypeError:
print "warning: error reading line: " + line
except StopIteration:
pass # EOF
def getCharge(self, atomName, resName):
'''given a residue and atom, find the charge'''
atomUp = string.strip(atomName.upper())
resUp = string.strip(resName.upper())
#don't want to mess with case sensitivity
if resUp in self.residues and atomUp in self.residues[resUp]: # normal
return self.residues[resUp][atomUp]
elif atomUp in self.atoms: # trying to use default
return self.atoms[atomUp]
elif atomUp[0] in self.atoms: # trying to use just atom type
return self.atoms[atomUp[0]]
else: # can't do it
return None
def getTrinaryCharge(self, atomName, resName):
'''returns -1, 0 or +1 depending on charge and hydrophobic threshold'''
tempCharge = self.getCharge(atomName, resName)
if tempCharge is None:
return None
elif abs(tempCharge) < charge.hydrophobicThreshold:
return 0
elif tempCharge > 0:
return +1
else: # must be < 0
return -1
#main, only runs if testing reading of files
if -1 != string.find(sys.argv[0], "charge.py"):
if len(sys.argv) >= 2:
chargeD = charge(sys.argv[1])
print chargeD.residues, chargeD.atoms
print chargeD.getCharge('CE1', 'HSE')
|
ryancoleman/traveldistance
|
src/charge.py
|
Python
|
gpl-2.0
| 2,913
|
[
"CRYSTAL"
] |
0f640d1abad436cfa4f076498c106e22c8b592561cfba7e510ef95d6638dc6f0
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import inspect
import types
import warnings
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [
"batch_shape", "get_batch_shape", "event_shape", "get_event_shape",
"sample_n", "log_prob", "prob", "log_cdf", "cdf", "log_survival_function",
"survival_function", "entropy", "mean", "variance", "std", "mode"]
@six.add_metaclass(abc.ABCMeta)
class _BaseDistribution(object):
"""Abstract base class needed for resolving subclass hierarchy."""
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create
# a non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
class _DistributionMeta(abc.ABCMeta):
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError("Expected non-empty baseclass. Does Distribution "
"not subclass _BaseDistribution?")
which_base = [
base for base in baseclasses
if base == _BaseDistribution or issubclass(base, Distribution)]
base = which_base[0]
if base == _BaseDistribution: # Nothing to be done for Distribution
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError("First parent class declared for %s must be "
"Distribution, but saw '%s'" % (classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
special_attr = "_%s" % attr
class_attr_value = attrs.get(attr, None)
if attr in attrs:
# The method is being overridden, do not update its docstring
continue
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
"Internal error: expected base class '%s' to implement method '%s'"
% (base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
if class_special_attr_value is None:
# No _special method available, no need to update the docstring.
continue
class_special_attr_docstring = inspect.getdoc(class_special_attr_value)
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_value = _copy_fn(base_attr_value)
class_attr_docstring = inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
"Expected base class fn to contain a docstring: %s.%s"
% (base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
("Additional documentation from `%s`:\n\n%s"
% (classname, class_special_attr_docstring)))
attrs[attr] = class_attr_value
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
@six.add_metaclass(_DistributionMeta)
class Distribution(_BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable `log_prob(value,
name="log_prob")` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@distribution_util.AppendDocstring("Some other details.")
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample` and
`sample_n`.
`sample_n_shape = (n,) + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.get_event_shape()
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape
# Sampling returns a sample per distribution. `samples` has shape
# (5, 2, 2), which is (n,) + batch_shape + event_shape, where n=5,
# batch_shape=(2, 2), and event_shape=().
samples = u.sample_n(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape (2, 2) as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is (2, 2), one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
```
### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `a` and `b`, and does not have well-defined mode if
`a < 1` or `b < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode().eval()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode().eval()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean().eval()
```
"""
def __init__(self,
dtype,
is_continuous,
is_reparameterized,
validate_args,
allow_nan_stats,
parameters=None,
graph_parents=None,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
is_continuous: Python boolean. If `True` this
`Distribution` is continuous over its supported domain.
is_reparameterized: Python boolean. If `True` this
`Distribution` can be reparameterized in terms of some standard
distribution with a function whose Jacobian is constant for the support
of the standard distribution.
validate_args: Python boolean. Whether to validate input with asserts.
If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: Python boolean. If `False`, raise an
exception if a statistic (e.g., mean, mode) is undefined for any batch
member. If True, batch members with valid parameters leading to
undefined statistics will return `NaN` for this statistic.
parameters: Python dictionary of parameters used to instantiate this
`Distribution`.
graph_parents: Python list of graph prerequisites of this `Distribution`.
name: A name for this distribution. Default: subclass name.
Raises:
ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not contrib_framework.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
parameters = parameters or {}
self._dtype = dtype
self._is_continuous = is_continuous
self._is_reparameterized = is_reparameterized
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = parameters
self._graph_parents = graph_parents
self._name = name or type(self).__name__
@classmethod
def param_shapes(cls, sample_shape, name="DistributionParamShapes"):
"""Shapes of parameters given the desired shape of a call to `sample()`.
Subclasses should override static method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with ops.name_scope(name, values=[sample_shape]):
return cls._param_shapes(sample_shape)
@classmethod
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. TensorShape) shapes.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tensor_shape.TensorShape):
if not sample_shape.is_fully_defined():
raise ValueError("TensorShape sample_shape must be fully defined")
sample_shape = sample_shape.as_list()
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tensor_util.constant_value(shape)
if static_shape is None:
raise ValueError(
"sample_shape must be a fully-defined TensorShape or list/tuple")
static_params[name] = tensor_shape.TensorShape(static_shape)
return static_params
@staticmethod
def _param_shapes(sample_shape):
raise NotImplementedError("_param_shapes not implemented")
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype
@property
def parameters(self):
"""Dictionary of parameters used to instantiate this `Distribution`."""
return self._parameters
@property
def is_continuous(self):
return self._is_continuous
@property
def is_reparameterized(self):
return self._is_reparameterized
@property
def allow_nan_stats(self):
"""Python boolean describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance
of a Cauchy distribution is infinity. However, sometimes the
statistic is undefined, e.g., if a distribution's pdf does not achieve a
maximum within the support of the distribution, the mode is undefined.
If the mean is undefined, then by definition the variance is undefined.
E.g. the mean for Student's T for df = 1 is undefined (no clear way to say
it is either + or - infinity), so the variance = E[(X - mean)^2] is also
undefined.
Returns:
allow_nan_stats: Python boolean.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python boolean indicated possibly expensive checks are enabled."""
return self._validate_args
def copy(self, **override_parameters_kwargs):
"""Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
intialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` intitialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
"""
parameters = dict(self.parameters, **override_parameters_kwargs)
# Python3 leaks "__class__" into `locals()` so we remove if present.
# TODO(b/32376812): Remove this pop.
parameters.pop("__class__", None)
return type(self)(**parameters)
def _batch_shape(self):
raise NotImplementedError("batch_shape is not implemented")
def batch_shape(self, name="batch_shape"):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The product of the dimensions of the `batch_shape` is the number of
independent distributions of this kind the instance represents.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_scope(name):
if self.get_batch_shape().is_fully_defined():
return ops.convert_to_tensor(self.get_batch_shape().as_list(),
dtype=dtypes.int32,
name="batch_shape")
return self._batch_shape()
def _get_batch_shape(self):
return tensor_shape.TensorShape(None)
def get_batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
Same meaning as `batch_shape`. May be only partially defined.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
return self._get_batch_shape()
def _event_shape(self):
raise NotImplementedError("event_shape is not implemented")
def event_shape(self, name="event_shape"):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_scope(name):
if self.get_event_shape().is_fully_defined():
return ops.convert_to_tensor(self.get_event_shape().as_list(),
dtype=dtypes.int32,
name="event_shape")
return self._event_shape()
def _get_event_shape(self):
return tensor_shape.TensorShape(None)
def get_event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
Same meaning as `event_shape`. May be only partially defined.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return self._get_event_shape()
@property
def is_scalar_event(self):
"""Indicates that `event_shape==[]`."""
return ops.convert_to_tensor(
self._is_scalar_helper(self.get_event_shape, self.event_shape),
name="is_scalar_event")
@property
def is_scalar_batch(self):
"""Indicates that `batch_shape==[]`."""
return ops.convert_to_tensor(
self._is_scalar_helper(self.get_batch_shape, self.batch_shape),
name="is_scalar_batch")
def _sample_n(self, n, seed=None):
raise NotImplementedError("sample_n is not implemented")
def sample(self, sample_shape=(), seed=None, name="sample",
**condition_kwargs):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
if sample_shape.get_shape().ndims == 0:
return self.sample_n(sample_shape, seed, **condition_kwargs)
sample_shape, total = self._expand_sample_shape(sample_shape)
samples = self.sample_n(total, seed, **condition_kwargs)
output_shape = array_ops.concat_v2(
[sample_shape, array_ops.slice(array_ops.shape(samples), [1], [-1])],
0)
output = array_ops.reshape(samples, output_shape)
output.set_shape(tensor_util.constant_value_as_shape(
sample_shape).concatenate(samples.get_shape()[1:]))
return output
def sample_n(self, n, seed=None, name="sample_n", **condition_kwargs):
"""Generate `n` samples.
Args:
n: `Scalar` `Tensor` of type `int32` or `int64`, the number of
observations to sample.
seed: Python integer seed for RNG
name: name to give to the op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
samples: a `Tensor` with a prepended dimension (n,).
Raises:
TypeError: if `n` is not an integer type.
"""
warnings.warn("Please use `sample` instead of `sample_n`. `sample_n` "
"will be deprecated in December 2016.",
PendingDeprecationWarning)
with self._name_scope(name, values=[n]):
n = ops.convert_to_tensor(n, name="n")
if not n.dtype.is_integer:
raise TypeError("n.dtype=%s is not an integer type" % n.dtype)
x = self._sample_n(n, seed, **condition_kwargs)
# Set shape hints.
sample_shape = tensor_shape.TensorShape(
tensor_util.constant_value(n))
batch_ndims = self.get_batch_shape().ndims
event_ndims = self.get_event_shape().ndims
if batch_ndims is not None and event_ndims is not None:
inferred_shape = sample_shape.concatenate(
self.get_batch_shape().concatenate(
self.get_event_shape()))
x.set_shape(inferred_shape)
elif x.get_shape().ndims is not None and x.get_shape().ndims > 0:
x.get_shape()[0].merge_with(sample_shape[0])
if batch_ndims is not None and batch_ndims > 0:
x.get_shape()[1:1+batch_ndims].merge_with(self.get_batch_shape())
if event_ndims is not None and event_ndims > 0:
x.get_shape()[-event_ndims:].merge_with(self.get_event_shape())
return x
def _log_prob(self, value):
raise NotImplementedError("log_prob is not implemented")
def log_prob(self, value, name="log_prob", **condition_kwargs):
"""Log probability density/mass function (depending on `is_continuous`).
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_prob(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._prob(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def prob(self, value, name="prob", **condition_kwargs):
"""Probability density/mass function (depending on `is_continuous`).
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._prob(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_prob(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def _log_cdf(self, value):
raise NotImplementedError("log_cdf is not implemented")
def log_cdf(self, value, name="log_cdf", **condition_kwargs):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_cdf(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._cdf(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def _cdf(self, value):
raise NotImplementedError("cdf is not implemented")
def cdf(self, value, name="cdf", **condition_kwargs):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._cdf(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_cdf(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def _log_survival_function(self, value):
raise NotImplementedError("log_survival_function is not implemented")
def log_survival_function(self, value, name="log_survival_function",
**condition_kwargs):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_survival_function(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(1. - self.cdf(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def _survival_function(self, value):
raise NotImplementedError("survival_function is not implemented")
def survival_function(self, value, name="survival_function",
**condition_kwargs):
"""Survival function.
Given random variable `X`, the survival function is defined:
```
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._survival_function(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return 1. - self.cdf(value, **condition_kwargs)
except NotImplementedError:
raise original_exception
def _entropy(self):
raise NotImplementedError("entropy is not implemented")
def entropy(self, name="entropy"):
"""Shannon entropy in nats."""
with self._name_scope(name):
return self._entropy()
def _mean(self):
raise NotImplementedError("mean is not implemented")
def mean(self, name="mean"):
"""Mean."""
with self._name_scope(name):
return self._mean()
def _variance(self):
raise NotImplementedError("variance is not implemented")
def variance(self, name="variance"):
"""Variance."""
with self._name_scope(name):
return self._variance()
def _std(self):
raise NotImplementedError("std is not implemented")
def std(self, name="std"):
"""Standard deviation."""
with self._name_scope(name):
return self._std()
def _mode(self):
raise NotImplementedError("mode is not implemented")
def mode(self, name="mode"):
"""Mode."""
with self._name_scope(name):
return self._mode()
def log_pdf(self, value, name="log_pdf", **condition_kwargs):
"""Log probability density function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if not `is_continuous`.
"""
warnings.warn("Please use `log_prob` instead of `log_pdf`. `log_pdf` "
"will be deprecated in December 2016.",
PendingDeprecationWarning)
if not self.is_continuous:
raise TypeError("log_pdf is undefined for non-continuous distributions.")
return self.log_prob(value, name=name, **condition_kwargs)
def pdf(self, value, name="pdf", **condition_kwargs):
"""Probability density function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if not `is_continuous`.
"""
warnings.warn("Please use `prob` instead of `pdf`. `pdf` will be "
"deprecated in December 2016.",
PendingDeprecationWarning)
if not self.is_continuous:
raise TypeError("pdf is undefined for non-continuous distributions.")
return self.prob(value, name, **condition_kwargs)
def log_pmf(self, value, name="log_pmf", **condition_kwargs):
"""Log probability mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
log_pmf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if `is_continuous`.
"""
warnings.warn("Please use `log_prob` instead of `log_pmf`. `log_pmf` will "
"be deprecated in December 2016.",
PendingDeprecationWarning)
if self.is_continuous:
raise TypeError("log_pmf is undefined for continuous distributions.")
return self.log_prob(value, name=name, **condition_kwargs)
def pmf(self, value, name="pmf", **condition_kwargs):
"""Probability mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
pmf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if `is_continuous`.
"""
warnings.warn("Please use `prob` instead of `pmf`. `pmf` will be "
"deprecated in December 2016.",
PendingDeprecationWarning)
if self.is_continuous:
raise TypeError("pmf is undefined for continuous distributions.")
return self.prob(value, name=name, **condition_kwargs)
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
(values or []) + self._graph_parents)) as scope:
yield scope
def _expand_sample_shape(self, sample_shape):
"""Helper to `sample` which ensures sample_shape is 1D."""
sample_shape_static_val = tensor_util.constant_value(sample_shape)
ndims = sample_shape.get_shape().ndims
if sample_shape_static_val is None:
if ndims is None or not sample_shape.get_shape().is_fully_defined():
ndims = array_ops.rank(sample_shape)
expanded_shape = distribution_util.pick_vector(
math_ops.equal(ndims, 0),
np.array((1,), dtype=dtypes.int32.as_numpy_dtype()),
array_ops.shape(sample_shape))
sample_shape = array_ops.reshape(sample_shape, expanded_shape)
total = math_ops.reduce_prod(sample_shape) # reduce_prod([]) == 1
else:
if ndims is None:
raise ValueError(
"Shouldn't be here; ndims cannot be none when we have a "
"tf.constant shape.")
if ndims == 0:
sample_shape_static_val = np.reshape(sample_shape_static_val, [1])
sample_shape = ops.convert_to_tensor(
sample_shape_static_val,
dtype=dtypes.int32,
name="sample_shape")
total = np.prod(sample_shape_static_val,
dtype=dtypes.int32.as_numpy_dtype())
return sample_shape, total
def _is_scalar_helper(self, static_shape_fn, dynamic_shape_fn):
"""Implementation for `is_scalar_batch` and `is_scalar_event`."""
if static_shape_fn().ndims is not None:
return static_shape_fn().ndims == 0
shape = dynamic_shape_fn()
if (shape.get_shape().ndims is not None and
shape.get_shape()[0].value is not None):
# If the static_shape_fn is correctly written then we should never execute
# this branch. We keep it just in case there's some unimagined corner
# case.
return shape.get_shape().as_list() == [0]
return math_ops.equal(array_ops.shape(shape)[0], 0)
|
AndreasMadsen/tensorflow
|
tensorflow/contrib/distributions/python/ops/distribution.py
|
Python
|
apache-2.0
| 35,525
|
[
"Gaussian"
] |
04e67117adc8fd3452783a6ea842e9b6b74b16135e3beaf4055208b2e45e4dd8
|
"""The ants module provides basic functions for interfacing with ants functions.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from builtins import range
from ..base import TraitedSpec, File, traits, InputMultiPath
from .base import ANTSCommand, ANTSCommandInputSpec
import os
from ..traits_extension import isdefined
class ANTSInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, argstr='%d', usedefault=False,
position=1, desc='image dimension (2 or 3)')
fixed_image = InputMultiPath(File(exists=True), mandatory=True,
desc=('image to apply transformation to (generally a coregistered '
'functional)'))
moving_image = InputMultiPath(File(exists=True), argstr='%s',
mandatory=True,
desc=('image to apply transformation to (generally a coregistered '
'functional)'))
# Not all metrics are appropriate for all modalities. Also, not all metrics
# are efficeint or appropriate at all resolution levels, Some metrics perform
# well for gross global registraiton, but do poorly for small changes (i.e.
# Mattes), and some metrics do well for small changes but don't work well for
# gross level changes (i.e. 'CC').
#
# This is a two stage registration. in the first stage
# [ 'Mattes', .................]
# ^^^^^^ <- First stage
# Do a unimodal registration of the first elements of the fixed/moving input
# list use the"CC" as the metric.
#
# In the second stage
# [ ....., ['Mattes','CC'] ]
# ^^^^^^^^^^^^^^^ <- Second stage
# Do a multi-modal registration where the first elements of fixed/moving
# input list use 'CC' metric and that is added to 'Mattes' metric result of
# the second elements of the fixed/moving input.
#
# Cost = Sum_i ( metricweight[i] Metric_i ( fixedimage[i], movingimage[i]) )
metric = traits.List(traits.Enum('CC', 'MI', 'SMI', 'PR', 'SSD',
'MSQ', 'PSE'), mandatory=True, desc='')
metric_weight = traits.List(traits.Float(), requires=['metric'], desc='')
radius = traits.List(traits.Int(), requires=['metric'], desc='')
output_transform_prefix = traits.Str('out', usedefault=True,
argstr='--output-naming %s',
mandatory=True, desc='')
transformation_model = traits.Enum('Diff', 'Elast', 'Exp', 'Greedy Exp',
'SyN', argstr='%s', mandatory=True,
desc='')
gradient_step_length = traits.Float(
requires=['transformation_model'], desc='')
number_of_time_steps = traits.Float(
requires=['gradient_step_length'], desc='')
delta_time = traits.Float(requires=['number_of_time_steps'], desc='')
symmetry_type = traits.Float(requires=['delta_time'], desc='')
use_histogram_matching = traits.Bool(
argstr='%s', default=True, usedefault=True)
number_of_iterations = traits.List(
traits.Int(), argstr='--number-of-iterations %s', sep='x')
smoothing_sigmas = traits.List(
traits.Int(), argstr='--gaussian-smoothing-sigmas %s', sep='x')
subsampling_factors = traits.List(
traits.Int(), argstr='--subsampling-factors %s', sep='x')
affine_gradient_descent_option = traits.List(traits.Float(), argstr='%s')
mi_option = traits.List(traits.Int(), argstr='--MI-option %s', sep='x')
regularization = traits.Enum('Gauss', 'DMFFD', argstr='%s', desc='')
regularization_gradient_field_sigma = traits.Float(
requires=['regularization'], desc='')
regularization_deformation_field_sigma = traits.Float(
requires=['regularization'], desc='')
number_of_affine_iterations = traits.List(
traits.Int(), argstr='--number-of-affine-iterations %s', sep='x')
class ANTSOutputSpec(TraitedSpec):
affine_transform = File(exists=True, desc='Affine transform file')
warp_transform = File(exists=True, desc='Warping deformation field')
inverse_warp_transform = File(
exists=True, desc='Inverse warping deformation field')
metaheader = File(exists=True, desc='VTK metaheader .mhd file')
metaheader_raw = File(exists=True, desc='VTK metaheader .raw file')
class ANTS(ANTSCommand):
"""
Examples
--------
>>> from nipype.interfaces.ants import ANTS
>>> ants = ANTS()
>>> ants.inputs.dimension = 3
>>> ants.inputs.output_transform_prefix = 'MY'
>>> ants.inputs.metric = ['CC']
>>> ants.inputs.fixed_image = ['T1.nii']
>>> ants.inputs.moving_image = ['resting.nii']
>>> ants.inputs.metric_weight = [1.0]
>>> ants.inputs.radius = [5]
>>> ants.inputs.transformation_model = 'SyN'
>>> ants.inputs.gradient_step_length = 0.25
>>> ants.inputs.number_of_iterations = [50, 35, 15]
>>> ants.inputs.use_histogram_matching = True
>>> ants.inputs.mi_option = [32, 16000]
>>> ants.inputs.regularization = 'Gauss'
>>> ants.inputs.regularization_gradient_field_sigma = 3
>>> ants.inputs.regularization_deformation_field_sigma = 0
>>> ants.inputs.number_of_affine_iterations = [10000,10000,10000,10000,10000]
>>> ants.cmdline
'ANTS 3 --MI-option 32x16000 --image-metric CC[ T1.nii, resting.nii, 1, 5 ] --number-of-affine-iterations \
10000x10000x10000x10000x10000 --number-of-iterations 50x35x15 --output-naming MY --regularization Gauss[3.0,0.0] \
--transformation-model SyN[0.25] --use-Histogram-Matching 1'
"""
_cmd = 'ANTS'
input_spec = ANTSInputSpec
output_spec = ANTSOutputSpec
def _image_metric_constructor(self):
retval = []
intensity_based = ['CC', 'MI', 'SMI', 'PR', 'SSD', 'MSQ']
point_set_based = ['PSE', 'JTB']
for ii in range(len(self.inputs.moving_image)):
if self.inputs.metric[ii] in intensity_based:
retval.append(
'--image-metric %s[ %s, %s, %g, %d ]' % (self.inputs.metric[ii],
self.inputs.fixed_image[
ii],
self.inputs.moving_image[
ii],
self.inputs.metric_weight[
ii],
self.inputs.radius[ii]))
elif self.inputs.metric[ii] == point_set_based:
pass
# retval.append('--image-metric %s[%s, %s, ...'.format(self.inputs.metric[ii],
# self.inputs.fixed_image[ii], self.inputs.moving_image[ii], ...))
return ' '.join(retval)
def _transformation_constructor(self):
model = self.inputs.transformation_model
step_length = self.inputs.gradient_step_length
time_step = self.inputs.number_of_time_steps
delta_time = self.inputs.delta_time
symmetry_type = self.inputs.symmetry_type
retval = ['--transformation-model %s' % model]
parameters = []
for elem in (step_length, time_step, delta_time, symmetry_type):
if elem is not traits.Undefined:
parameters.append('%#.2g' % elem)
if len(parameters) > 0:
if len(parameters) > 1:
parameters = ','.join(parameters)
else:
parameters = ''.join(parameters)
retval.append('[%s]' % parameters)
return ''.join(retval)
def _regularization_constructor(self):
return '--regularization {0}[{1},{2}]'.format(self.inputs.regularization,
self.inputs.regularization_gradient_field_sigma,
self.inputs.regularization_deformation_field_sigma)
def _affine_gradient_descent_option_constructor(self):
values = self.inputs.affine_gradient_descent_option
defaults = [0.1, 0.5, 1.e-4, 1.e-4]
for ii in range(len(defaults)):
try:
defaults[ii] = values[ii]
except IndexError:
break
parameters = self._format_xarray([('%g' % defaults[index]) for index in range(4)])
retval = ['--affine-gradient-descent-option', parameters]
return ' '.join(retval)
def _format_arg(self, opt, spec, val):
if opt == 'moving_image':
return self._image_metric_constructor()
elif opt == 'transformation_model':
return self._transformation_constructor()
elif opt == 'regularization':
return self._regularization_constructor()
elif opt == 'affine_gradient_descent_option':
return self._affine_gradient_descent_option_constructor()
elif opt == 'use_histogram_matching':
if self.inputs.use_histogram_matching:
return '--use-Histogram-Matching 1'
else:
return '--use-Histogram-Matching 0'
return super(ANTS, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['affine_transform'] = os.path.abspath(
self.inputs.output_transform_prefix + 'Affine.txt')
outputs['warp_transform'] = os.path.abspath(
self.inputs.output_transform_prefix + 'Warp.nii.gz')
outputs['inverse_warp_transform'] = os.path.abspath(
self.inputs.output_transform_prefix + 'InverseWarp.nii.gz')
# outputs['metaheader'] = os.path.abspath(self.inputs.output_transform_prefix + 'velocity.mhd')
# outputs['metaheader_raw'] = os.path.abspath(self.inputs.output_transform_prefix + 'velocity.raw')
return outputs
class RegistrationInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, argstr='--dimensionality %d',
usedefault=True, desc='image dimension (2 or 3)')
fixed_image = InputMultiPath(File(exists=True), mandatory=True,
desc='image to apply transformation to (generally a coregistered functional)')
fixed_image_mask = File(argstr='%s', exists=True,
desc='mask used to limit metric sampling region of the fixed image')
moving_image = InputMultiPath(File(exists=True), mandatory=True,
desc='image to apply transformation to (generally a coregistered functional)')
moving_image_mask = File(requires=['fixed_image_mask'],
exists=True, desc='mask used to limit metric sampling region of the moving image')
save_state = File(argstr='--save-state %s', exists=False,
desc='Filename for saving the internal restorable state of the registration')
restore_state = File(argstr='--restore-state %s', exists=True,
desc='Filename for restoring the internal restorable state of the registration')
initial_moving_transform = File(argstr='%s', exists=True, desc='',
xor=['initial_moving_transform_com'])
invert_initial_moving_transform = traits.Bool(
default=False, requires=["initial_moving_transform"], usedefault=True,
desc='', xor=['initial_moving_transform_com'])
initial_moving_transform_com = traits.Enum(0, 1, 2, argstr='%s',
default=0, xor=['initial_moving_transform'],
desc="Use center of mass for moving transform")
metric_item_trait = traits.Enum("CC", "MeanSquares", "Demons", "GC", "MI",
"Mattes")
metric_stage_trait = traits.Either(
metric_item_trait, traits.List(metric_item_trait))
metric = traits.List(metric_stage_trait, mandatory=True,
desc='the metric(s) to use for each stage. '
'Note that multiple metrics per stage are not supported '
'in ANTS 1.9.1 and earlier.')
metric_weight_item_trait = traits.Float(1.0)
metric_weight_stage_trait = traits.Either(
metric_weight_item_trait, traits.List(metric_weight_item_trait))
metric_weight = traits.List(
metric_weight_stage_trait, value=[1.0], usedefault=True,
requires=['metric'], mandatory=True,
desc='the metric weight(s) for each stage. '
'The weights must sum to 1 per stage.')
radius_bins_item_trait = traits.Int(5)
radius_bins_stage_trait = traits.Either(
radius_bins_item_trait, traits.List(radius_bins_item_trait))
radius_or_number_of_bins = traits.List(
radius_bins_stage_trait, value=[5], usedefault=True,
requires=['metric_weight'],
desc='the number of bins in each stage for the MI and Mattes metric, '
'the radius for other metrics')
sampling_strategy_item_trait = traits.Enum(
"None", "Regular", "Random", None)
sampling_strategy_stage_trait = traits.Either(
sampling_strategy_item_trait, traits.List(sampling_strategy_item_trait))
sampling_strategy = traits.List(
trait=sampling_strategy_stage_trait, requires=['metric_weight'],
desc='the metric sampling strategy (strategies) for each stage')
sampling_percentage_item_trait = traits.Either(
traits.Range(low=0.0, high=1.0), None)
sampling_percentage_stage_trait = traits.Either(
sampling_percentage_item_trait, traits.List(sampling_percentage_item_trait))
sampling_percentage = traits.List(
trait=sampling_percentage_stage_trait, requires=['sampling_strategy'],
desc="the metric sampling percentage(s) to use for each stage")
use_estimate_learning_rate_once = traits.List(traits.Bool(), desc='')
use_histogram_matching = traits.Either(
traits.Bool, traits.List(traits.Bool(argstr='%s')),
default=True, usedefault=True)
interpolation = traits.Enum(
'Linear', 'NearestNeighbor', 'CosineWindowedSinc', 'WelchWindowedSinc',
'HammingWindowedSinc', 'LanczosWindowedSinc', 'BSpline', 'MultiLabel', 'Gaussian',
argstr='%s', usedefault=True)
interpolation_parameters = traits.Either(traits.Tuple(traits.Int()), # BSpline (order)
traits.Tuple(traits.Float(), # Gaussian/MultiLabel (sigma, alpha)
traits.Float())
)
write_composite_transform = traits.Bool(
argstr='--write-composite-transform %d',
default=False, usedefault=True, desc='')
collapse_output_transforms = traits.Bool(
argstr='--collapse-output-transforms %d', default=True,
usedefault=True, # This should be true for explicit completeness
desc=('Collapse output transforms. Specifically, enabling this option '
'combines all adjacent linear transforms and composes all '
'adjacent displacement field transforms before writing the '
'results to disk.'))
initialize_transforms_per_stage = traits.Bool(
argstr='--initialize-transforms-per-stage %d', default=False,
usedefault=True, # This should be true for explicit completeness
desc=('Initialize linear transforms from the previous stage. By enabling this option, '
'the current linear stage transform is directly intialized from the previous '
'stages linear transform; this allows multiple linear stages to be run where '
'each stage directly updates the estimated linear transform from the previous '
'stage. (e.g. Translation -> Rigid -> Affine). '
))
# NOTE: Even though only 0=False and 1=True are allowed, ants uses integer
# values instead of booleans
float = traits.Bool(
argstr='--float %d', default=False,
desc='Use float instead of double for computations.')
transforms = traits.List(traits.Enum('Rigid', 'Affine', 'CompositeAffine',
'Similarity', 'Translation', 'BSpline',
'GaussianDisplacementField', 'TimeVaryingVelocityField',
'TimeVaryingBSplineVelocityField', 'SyN', 'BSplineSyN',
'Exponential', 'BSplineExponential'), argstr='%s', mandatory=True)
# TODO: input checking and allow defaults
# All parameters must be specified for BSplineDisplacementField, TimeVaryingBSplineVelocityField, BSplineSyN,
# Exponential, and BSplineExponential. EVEN DEFAULTS!
transform_parameters = traits.List(traits.Either(traits.Tuple(traits.Float()), # Translation, Rigid, Affine,
# CompositeAffine, Similarity
traits.Tuple(traits.Float(), # GaussianDisplacementField, SyN
traits.Float(),
traits.Float()
),
traits.Tuple(traits.Float(), # BSplineSyn,
traits.Int(), # BSplineDisplacementField,
traits.Int(), # TimeVaryingBSplineVelocityField
traits.Int()
),
traits.Tuple(traits.Float(), # TimeVaryingVelocityField
traits.Int(),
traits.Float(),
traits.Float(),
traits.Float(),
traits.Float()
),
traits.Tuple(traits.Float(), # Exponential
traits.Float(),
traits.Float(),
traits.Int()
),
traits.Tuple(traits.Float(), # BSplineExponential
traits.Int(),
traits.Int(),
traits.Int(),
traits.Int()
),
)
)
# Convergence flags
number_of_iterations = traits.List(traits.List(traits.Int()))
smoothing_sigmas = traits.List(traits.List(traits.Float()), mandatory=True)
sigma_units = traits.List(traits.Enum('mm', 'vox'),
requires=['smoothing_sigmas'],
desc="units for smoothing sigmas")
shrink_factors = traits.List(traits.List(traits.Int()), mandatory=True)
convergence_threshold = traits.List(trait=traits.Float(), value=[
1e-6], minlen=1, requires=['number_of_iterations'], usedefault=True)
convergence_window_size = traits.List(trait=traits.Int(), value=[
10], minlen=1, requires=['convergence_threshold'], usedefault=True)
# Output flags
output_transform_prefix = traits.Str(
"transform", usedefault=True, argstr="%s", desc="")
output_warped_image = traits.Either(
traits.Bool, File(), hash_files=False, desc="")
output_inverse_warped_image = traits.Either(traits.Bool, File(),
hash_files=False,
requires=['output_warped_image'], desc="")
winsorize_upper_quantile = traits.Range(
low=0.0, high=1.0, value=1.0, argstr='%s', usedefault=True, desc="The Upper quantile to clip image ranges")
winsorize_lower_quantile = traits.Range(
low=0.0, high=1.0, value=0.0, argstr='%s', usedefault=True, desc="The Lower quantile to clip image ranges")
class RegistrationOutputSpec(TraitedSpec):
forward_transforms = traits.List(
File(exists=True), desc='List of output transforms for forward registration')
reverse_transforms = traits.List(
File(exists=True), desc='List of output transforms for reverse registration')
forward_invert_flags = traits.List(traits.Bool(
), desc='List of flags corresponding to the forward transforms')
reverse_invert_flags = traits.List(traits.Bool(
), desc='List of flags corresponding to the reverse transforms')
composite_transform = File(exists=True, desc='Composite transform file')
inverse_composite_transform = File(desc='Inverse composite transform file')
warped_image = File(desc="Outputs warped image")
inverse_warped_image = File(desc="Outputs the inverse of the warped image")
save_state = File(desc="The saved registration state to be restored")
class Registration(ANTSCommand):
"""
Examples
--------
>>> import copy, pprint
>>> from nipype.interfaces.ants import Registration
>>> reg = Registration()
>>> reg.inputs.fixed_image = 'fixed1.nii'
>>> reg.inputs.moving_image = 'moving1.nii'
>>> reg.inputs.output_transform_prefix = "output_"
>>> reg.inputs.initial_moving_transform = 'trans.mat'
>>> reg.inputs.invert_initial_moving_transform = True
>>> reg.inputs.transforms = ['Affine', 'SyN']
>>> reg.inputs.transform_parameters = [(2.0,), (0.25, 3.0, 0.0)]
>>> reg.inputs.number_of_iterations = [[1500, 200], [100, 50, 30]]
>>> reg.inputs.dimension = 3
>>> reg.inputs.write_composite_transform = True
>>> reg.inputs.collapse_output_transforms = False
>>> reg.inputs.initialize_transforms_per_stage = False
>>> reg.inputs.metric = ['Mattes']*2
>>> reg.inputs.metric_weight = [1]*2 # Default (value ignored currently by ANTs)
>>> reg.inputs.radius_or_number_of_bins = [32]*2
>>> reg.inputs.sampling_strategy = ['Random', None]
>>> reg.inputs.sampling_percentage = [0.05, None]
>>> reg.inputs.convergence_threshold = [1.e-8, 1.e-9]
>>> reg.inputs.convergence_window_size = [20]*2
>>> reg.inputs.smoothing_sigmas = [[1,0], [2,1,0]]
>>> reg.inputs.sigma_units = ['vox'] * 2
>>> reg.inputs.shrink_factors = [[2,1], [3,2,1]]
>>> reg.inputs.use_estimate_learning_rate_once = [True, True]
>>> reg.inputs.use_histogram_matching = [True, True] # This is the default
>>> reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
>>> reg1 = copy.deepcopy(reg)
>>> reg1.inputs.winsorize_lower_quantile = 0.025
>>> reg1.cmdline
'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \
--initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \
--transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \
--convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \
--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] \
--metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] \
--smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \
--use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 1.0 ] --write-composite-transform 1'
>>> reg1.run() # doctest: +SKIP
>>> reg2 = copy.deepcopy(reg)
>>> reg2.inputs.winsorize_upper_quantile = 0.975
>>> reg2.cmdline
'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \
--initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \
--transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \
--convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \
--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] \
--metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] \
--smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \
--use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 0.975 ] --write-composite-transform 1'
>>> reg3 = copy.deepcopy(reg)
>>> reg3.inputs.winsorize_lower_quantile = 0.025
>>> reg3.inputs.winsorize_upper_quantile = 0.975
>>> reg3.cmdline
'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \
--initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \
--transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \
--convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \
--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] \
--metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] \
--smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \
--use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 0.975 ] --write-composite-transform 1'
>>> reg3a = copy.deepcopy(reg)
>>> reg3a.inputs.float = True
>>> reg3a.cmdline
'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 1 \
--initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear \
--output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] \
--metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] \
--smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 \
--transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] \
--convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 \
--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \
--write-composite-transform 1'
>>> reg3b = copy.deepcopy(reg)
>>> reg3b.inputs.float = False
>>> reg3b.cmdline
'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 0 \
--initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear \
--output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] \
--metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] \
--smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 \
--transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] \
--convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 \
--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \
--write-composite-transform 1'
>>> # Test collapse transforms flag
>>> reg4 = copy.deepcopy(reg)
>>> reg4.inputs.save_state = 'trans.mat'
>>> reg4.inputs.restore_state = 'trans.mat'
>>> reg4.inputs.initialize_transforms_per_stage = True
>>> reg4.inputs.collapse_output_transforms = True
>>> outputs = reg4._list_outputs()
>>> pprint.pprint(outputs) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
{'composite_transform': '.../nipype/testing/data/output_Composite.h5',
'forward_invert_flags': [],
'forward_transforms': [],
'inverse_composite_transform': '.../nipype/testing/data/output_InverseComposite.h5',
'inverse_warped_image': <undefined>,
'reverse_invert_flags': [],
'reverse_transforms': [],
'save_state': '.../nipype/testing/data/trans.mat',
'warped_image': '.../nipype/testing/data/output_warped_image.nii.gz'}
>>> reg4.cmdline
'antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \
--initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \
--restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] \
--metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] \
--smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 \
--transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] \
--convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 \
--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \
--write-composite-transform 1'
>>> # Test collapse transforms flag
>>> reg4b = copy.deepcopy(reg4)
>>> reg4b.inputs.write_composite_transform = False
>>> outputs = reg4b._list_outputs()
>>> pprint.pprint(outputs) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
{'composite_transform': <undefined>,
'forward_invert_flags': [False, False],
'forward_transforms': ['.../nipype/testing/data/output_0GenericAffine.mat',
'.../nipype/testing/data/output_1Warp.nii.gz'],
'inverse_composite_transform': <undefined>,
'inverse_warped_image': <undefined>,
'reverse_invert_flags': [True, False],
'reverse_transforms': ['.../nipype/testing/data/output_0GenericAffine.mat', \
'.../nipype/testing/data/output_1InverseWarp.nii.gz'],
'save_state': '.../nipype/testing/data/trans.mat',
'warped_image': '.../nipype/testing/data/output_warped_image.nii.gz'}
>>> reg4b.aggregate_outputs() # doctest: +SKIP
>>> reg4b.cmdline
'antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \
--initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \
--restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] \
--metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] \
--smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 \
--transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] \
--convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 \
--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \
--write-composite-transform 0'
>>> # Test multiple metrics per stage
>>> reg5 = copy.deepcopy(reg)
>>> reg5.inputs.fixed_image = 'fixed1.nii'
>>> reg5.inputs.moving_image = 'moving1.nii'
>>> reg5.inputs.metric = ['Mattes', ['Mattes', 'CC']]
>>> reg5.inputs.metric_weight = [1, [.5,.5]]
>>> reg5.inputs.radius_or_number_of_bins = [32, [32, 4] ]
>>> reg5.inputs.sampling_strategy = ['Random', None] # use default strategy in second stage
>>> reg5.inputs.sampling_percentage = [0.05, [0.05, 0.10]]
>>> reg5.cmdline
'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \
--initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \
--transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \
--convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \
--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] \
--metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] \
--metric CC[ fixed1.nii, moving1.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] \
--smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \
--use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1'
>>> # Test multiple inputs
>>> reg6 = copy.deepcopy(reg5)
>>> reg6.inputs.fixed_image = ['fixed1.nii', 'fixed2.nii']
>>> reg6.inputs.moving_image = ['moving1.nii', 'moving2.nii']
>>> reg6.cmdline
'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \
--initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \
--transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \
--convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \
--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] \
--metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] \
--metric CC[ fixed2.nii, moving2.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] \
--smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \
--use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1'
>>> # Test Interpolation Parameters (BSpline)
>>> reg7a = copy.deepcopy(reg)
>>> reg7a.inputs.interpolation = 'BSpline'
>>> reg7a.inputs.interpolation_parameters = (3,)
>>> reg7a.cmdline
'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \
--initialize-transforms-per-stage 0 --interpolation BSpline[ 3 ] --output [ output_, output_warped_image.nii.gz ] \
--transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \
--convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \
--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] \
--metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] \
--smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \
--use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1'
>>> # Test Interpolation Parameters (MultiLabel/Gaussian)
>>> reg7b = copy.deepcopy(reg)
>>> reg7b.inputs.interpolation = 'Gaussian'
>>> reg7b.inputs.interpolation_parameters = (1.0, 1.0)
>>> reg7b.cmdline
'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \
--initialize-transforms-per-stage 0 --interpolation Gaussian[ 1.0, 1.0 ] \
--output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] \
--metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] \
--smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 \
--transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] \
--convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 \
--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \
--write-composite-transform 1'
>>> # Test Extended Transform Parameters
>>> reg8 = copy.deepcopy(reg)
>>> reg8.inputs.transforms = ['Affine', 'BSplineSyN']
>>> reg8.inputs.transform_parameters = [(2.0,), (0.25, 26, 0, 3)]
>>> reg8.cmdline
'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \
--initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \
--transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \
--convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \
--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform BSplineSyN[ 0.25, 26, 0, 3 ] \
--metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] \
--smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \
--use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1'
"""
DEF_SAMPLING_STRATEGY = 'None'
"""The default sampling strategy argument."""
_cmd = 'antsRegistration'
input_spec = RegistrationInputSpec
output_spec = RegistrationOutputSpec
_quantilesDone = False
_linear_transform_names = ['Rigid', 'Affine', 'Translation', 'CompositeAffine', 'Similarity']
def _format_metric(self, index):
"""
Format the antsRegistration -m metric argument(s).
Parameters
----------
index: the stage index
"""
# The metric name input for the current stage.
name_input = self.inputs.metric[index]
# The stage-specific input dictionary.
stage_inputs = dict(
fixed_image=self.inputs.fixed_image[0],
moving_image=self.inputs.moving_image[0],
metric=name_input,
weight=self.inputs.metric_weight[index],
radius_or_bins=self.inputs.radius_or_number_of_bins[index],
optional=self.inputs.radius_or_number_of_bins[index]
)
# The optional sampling strategy and percentage.
if isdefined(self.inputs.sampling_strategy) and self.inputs.sampling_strategy:
sampling_strategy = self.inputs.sampling_strategy[index]
if sampling_strategy:
stage_inputs['sampling_strategy'] = sampling_strategy
if isdefined(self.inputs.sampling_percentage) and self.inputs.sampling_percentage:
sampling_percentage = self.inputs.sampling_percentage[index]
if sampling_percentage:
stage_inputs['sampling_percentage'] = sampling_percentage
# Make a list of metric specifications, one per -m command line
# argument for the current stage.
# If there are multiple inputs for this stage, then convert the
# dictionary of list inputs into a list of metric specifications.
# Otherwise, make a singleton list of the metric specification
# from the non-list inputs.
if isinstance(name_input, list):
items = list(stage_inputs.items())
indexes = list(range(0, len(name_input)))
specs = list()
for i in indexes:
temp = dict([(k, v[i]) for k, v in items])
if len(self.inputs.fixed_image) == 1:
temp["fixed_image"] = self.inputs.fixed_image[0]
else:
temp["fixed_image"] = self.inputs.fixed_image[i]
if len(self.inputs.moving_image) == 1:
temp["moving_image"] = self.inputs.moving_image[0]
else:
temp["moving_image"] = self.inputs.moving_image[i]
specs.append(temp)
else:
specs = [stage_inputs]
# Format the --metric command line metric arguments, one per
# specification.
return [self._format_metric_argument(**spec) for spec in specs]
@staticmethod
def _format_metric_argument(**kwargs):
retval = '%s[ %s, %s, %g, %d' % (kwargs['metric'],
kwargs['fixed_image'],
kwargs['moving_image'],
kwargs['weight'],
kwargs['radius_or_bins'])
# The optional sampling strategy.
if 'sampling_strategy' in kwargs:
sampling_strategy = kwargs['sampling_strategy']
elif 'sampling_percentage' in kwargs:
# The sampling percentage is specified but not the
# sampling strategy. Use the default strategy.
sampling_strategy = Registration.DEF_SAMPLING_STRATEGY
else:
sampling_strategy = None
# Format the optional sampling arguments.
if sampling_strategy:
retval += ', %s' % sampling_strategy
if 'sampling_percentage' in kwargs:
retval += ', %g' % kwargs['sampling_percentage']
retval += ' ]'
return retval
def _format_transform(self, index):
retval = []
retval.append('%s[ ' % self.inputs.transforms[index])
parameters = ', '.join([str(
element) for element in self.inputs.transform_parameters[index]])
retval.append('%s' % parameters)
retval.append(' ]')
return "".join(retval)
def _format_registration(self):
retval = []
for ii in range(len(self.inputs.transforms)):
retval.append('--transform %s' % (self._format_transform(ii)))
for metric in self._format_metric(ii):
retval.append('--metric %s' % metric)
retval.append('--convergence %s' % self._format_convergence(ii))
if isdefined(self.inputs.sigma_units):
retval.append('--smoothing-sigmas %s%s' %
(self._format_xarray(self.inputs.smoothing_sigmas[ii]),
self.inputs.sigma_units[ii]))
else:
retval.append('--smoothing-sigmas %s' %
self._format_xarray(self.inputs.smoothing_sigmas[ii]))
retval.append('--shrink-factors %s' %
self._format_xarray(self.inputs.shrink_factors[ii]))
if isdefined(self.inputs.use_estimate_learning_rate_once):
retval.append('--use-estimate-learning-rate-once %d' %
self.inputs.use_estimate_learning_rate_once[ii])
if isdefined(self.inputs.use_histogram_matching):
# use_histogram_matching is either a common flag for all transforms
# or a list of transform-specific flags
if isinstance(self.inputs.use_histogram_matching, bool):
histval = self.inputs.use_histogram_matching
else:
histval = self.inputs.use_histogram_matching[ii]
retval.append('--use-histogram-matching %d' % histval)
return " ".join(retval)
def _get_outputfilenames(self, inverse=False):
output_filename = None
if not inverse:
if isdefined(self.inputs.output_warped_image) and \
self.inputs.output_warped_image:
output_filename = self.inputs.output_warped_image
if isinstance(output_filename, bool):
output_filename = '%s_Warped.nii.gz' % self.inputs.output_transform_prefix
else:
output_filename = output_filename
return output_filename
inv_output_filename = None
if isdefined(self.inputs.output_inverse_warped_image) and \
self.inputs.output_inverse_warped_image:
inv_output_filename = self.inputs.output_inverse_warped_image
if isinstance(inv_output_filename, bool):
inv_output_filename = '%s_InverseWarped.nii.gz' % self.inputs.output_transform_prefix
else:
inv_output_filename = inv_output_filename
return inv_output_filename
def _format_convergence(self, ii):
convergence_iter = self._format_xarray(self.inputs.number_of_iterations[ii])
if len(self.inputs.convergence_threshold) > ii:
convergence_value = self.inputs.convergence_threshold[ii]
else:
convergence_value = self.inputs.convergence_threshold[0]
if len(self.inputs.convergence_window_size) > ii:
convergence_ws = self.inputs.convergence_window_size[ii]
else:
convergence_ws = self.inputs.convergence_window_size[0]
return '[ %s, %g, %d ]' % (convergence_iter, convergence_value, convergence_ws)
def _format_winsorize_image_intensities(self):
if not self.inputs.winsorize_upper_quantile > self.inputs.winsorize_lower_quantile:
raise RuntimeError("Upper bound MUST be more than lower bound: %g > %g"
% (self.inputs.winsorize_upper_quantile, self.inputs.winsorize_lower_quantile))
self._quantilesDone = True
return '--winsorize-image-intensities [ %s, %s ]' % (self.inputs.winsorize_lower_quantile,
self.inputs.winsorize_upper_quantile)
def _format_arg(self, opt, spec, val):
if opt == 'fixed_image_mask':
if isdefined(self.inputs.moving_image_mask):
return '--masks [ %s, %s ]' % (self.inputs.fixed_image_mask,
self.inputs.moving_image_mask)
else:
return '--masks %s' % self.inputs.fixed_image_mask
elif opt == 'transforms':
return self._format_registration()
elif opt == 'initial_moving_transform':
try:
do_invert_transform = int(self.inputs.invert_initial_moving_transform)
except ValueError:
do_invert_transform = 0 # Just do the default behavior
return '--initial-moving-transform [ %s, %d ]' % (self.inputs.initial_moving_transform,
do_invert_transform)
elif opt == 'initial_moving_transform_com':
try:
do_center_of_mass_init = int(self.inputs.initial_moving_transform_com)
except ValueError:
do_center_of_mass_init = 0 # Just do the default behavior
return '--initial-moving-transform [ %s, %s, %d ]' % (self.inputs.fixed_image[0],
self.inputs.moving_image[0],
do_center_of_mass_init)
elif opt == 'interpolation':
if self.inputs.interpolation in ['BSpline', 'MultiLabel', 'Gaussian'] and \
isdefined(self.inputs.interpolation_parameters):
return '--interpolation %s[ %s ]' % (self.inputs.interpolation,
', '.join([str(param)
for param in self.inputs.interpolation_parameters]))
else:
return '--interpolation %s' % self.inputs.interpolation
elif opt == 'output_transform_prefix':
out_filename = self._get_outputfilenames(inverse=False)
inv_out_filename = self._get_outputfilenames(inverse=True)
if out_filename and inv_out_filename:
return '--output [ %s, %s, %s ]' % (self.inputs.output_transform_prefix,
out_filename,
inv_out_filename)
elif out_filename:
return '--output [ %s, %s ]' % (self.inputs.output_transform_prefix,
out_filename)
else:
return '--output %s' % self.inputs.output_transform_prefix
elif opt == 'winsorize_upper_quantile' or opt == 'winsorize_lower_quantile':
if not self._quantilesDone:
return self._format_winsorize_image_intensities()
else:
self._quantilesDone = False
return '' # Must return something for argstr!
# This feature was removed from recent versions of antsRegistration due to corrupt outputs.
# elif opt == 'collapse_linear_transforms_to_fixed_image_header':
# return self._formatCollapseLinearTransformsToFixedImageHeader()
return super(Registration, self)._format_arg(opt, spec, val)
def _output_filenames(self, prefix, count, transform, inverse=False):
self.low_dimensional_transform_map = {'Rigid': 'Rigid.mat',
'Affine': 'Affine.mat',
'GenericAffine': 'GenericAffine.mat',
'CompositeAffine': 'Affine.mat',
'Similarity': 'Similarity.mat',
'Translation': 'Translation.mat',
'BSpline': 'BSpline.txt',
'Initial': 'DerivedInitialMovingTranslation.mat'}
if transform in list(self.low_dimensional_transform_map.keys()):
suffix = self.low_dimensional_transform_map[transform]
inverse_mode = inverse
else:
inverse_mode = False # These are not analytically invertable
if inverse:
suffix = 'InverseWarp.nii.gz'
else:
suffix = 'Warp.nii.gz'
return '%s%d%s' % (prefix, count, suffix), inverse_mode
def _list_outputs(self):
outputs = self._outputs().get()
outputs['forward_transforms'] = []
outputs['forward_invert_flags'] = []
outputs['reverse_transforms'] = []
outputs['reverse_invert_flags'] = []
# invert_initial_moving_transform should be always defined, even if
# there's no initial transform
invert_initial_moving_transform = False
if isdefined(self.inputs.invert_initial_moving_transform):
invert_initial_moving_transform = self.inputs.invert_initial_moving_transform
if self.inputs.write_composite_transform:
filename = self.inputs.output_transform_prefix + 'Composite.h5'
outputs['composite_transform'] = os.path.abspath(filename)
filename = self.inputs.output_transform_prefix + \
'InverseComposite.h5'
outputs['inverse_composite_transform'] = os.path.abspath(filename)
else: # If composite transforms are written, then individuals are not written (as of 2014-10-26
if not self.inputs.collapse_output_transforms:
transform_count = 0
if isdefined(self.inputs.initial_moving_transform):
outputs['forward_transforms'].append(self.inputs.initial_moving_transform)
outputs['forward_invert_flags'].append(invert_initial_moving_transform)
outputs['reverse_transforms'].insert(0, self.inputs.initial_moving_transform)
outputs['reverse_invert_flags'].insert(0, not invert_initial_moving_transform) # Prepend
transform_count += 1
elif isdefined(self.inputs.initial_moving_transform_com):
forward_filename, forward_inversemode = self._output_filenames(
self.inputs.output_transform_prefix,
transform_count,
'Initial')
reverse_filename, reverse_inversemode = self._output_filenames(
self.inputs.output_transform_prefix,
transform_count,
'Initial',
True)
outputs['forward_transforms'].append(os.path.abspath(forward_filename))
outputs['forward_invert_flags'].append(False)
outputs['reverse_transforms'].insert(0,
os.path.abspath(reverse_filename))
outputs['reverse_invert_flags'].insert(0, True)
transform_count += 1
for count in range(len(self.inputs.transforms)):
forward_filename, forward_inversemode = self._output_filenames(
self.inputs.output_transform_prefix, transform_count,
self.inputs.transforms[count])
reverse_filename, reverse_inversemode = self._output_filenames(
self.inputs.output_transform_prefix, transform_count,
self.inputs.transforms[count], True)
outputs['forward_transforms'].append(os.path.abspath(forward_filename))
outputs['forward_invert_flags'].append(forward_inversemode)
outputs['reverse_transforms'].insert(0, os.path.abspath(reverse_filename))
outputs['reverse_invert_flags'].insert(0, reverse_inversemode)
transform_count += 1
else:
transform_count = 0
is_linear = [t in self._linear_transform_names for t in self.inputs.transforms]
collapse_list = []
if isdefined(self.inputs.initial_moving_transform) or \
isdefined(self.inputs.initial_moving_transform_com):
is_linear.insert(0, True)
# Only files returned by collapse_output_transforms
if any(is_linear):
collapse_list.append('GenericAffine')
if not all(is_linear):
collapse_list.append('SyN')
for transform in collapse_list:
forward_filename, forward_inversemode = self._output_filenames(
self.inputs.output_transform_prefix,
transform_count,
transform,
inverse=False)
reverse_filename, reverse_inversemode = self._output_filenames(
self.inputs.output_transform_prefix,
transform_count,
transform,
inverse=True)
outputs['forward_transforms'].append(os.path.abspath(forward_filename))
outputs['forward_invert_flags'].append(forward_inversemode)
outputs['reverse_transforms'].append(os.path.abspath(reverse_filename))
outputs['reverse_invert_flags'].append(reverse_inversemode)
transform_count += 1
out_filename = self._get_outputfilenames(inverse=False)
inv_out_filename = self._get_outputfilenames(inverse=True)
if out_filename:
outputs['warped_image'] = os.path.abspath(out_filename)
if inv_out_filename:
outputs['inverse_warped_image'] = os.path.abspath(inv_out_filename)
if len(self.inputs.save_state):
outputs['save_state'] = os.path.abspath(self.inputs.save_state)
return outputs
|
iglpdc/nipype
|
nipype/interfaces/ants/registration.py
|
Python
|
bsd-3-clause
| 55,946
|
[
"Gaussian",
"VTK"
] |
dac3135269c121e2005993fbca52c24d81db169683b9527cac7d42d343c911a1
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
def join_path(prefix, *args):
path = str(prefix)
for elt in args:
path = os.path.join(path, str(elt))
return path
def ancestor(dir, n=1):
"""Get the nth ancestor of a directory."""
parent = os.path.abspath(dir)
for i in range(n):
parent = os.path.dirname(parent)
return parent
|
andysim/psi4
|
psi4/driver/util/filesystem.py
|
Python
|
gpl-2.0
| 1,284
|
[
"Psi4"
] |
85767998df72d607daf981239b360982f2cb954d1169ca3082d338eaedebe5be
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Chris Synan & Dataworlds LLC
# Portions copyright (c) 2012 Stephen P. Smith
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The software is free for non-commercial uses. Commercial uses of this software
# or any derivative must obtain a license from Dataworlds LLC (Austin TX)
# In addition, the above copyright notice and this permission notice shall
# be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
# IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, os, time, shutil, logging, logging.handlers, traceback
import threading, subprocess, requests
import urllib3.contrib.pyopenssl
import multiprocessing
from multiprocessing import Process, Pipe, Queue, Value, current_process
from subprocess import Popen, PIPE, call, signal
from datetime import datetime
from shutil import copy2
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
import web, random, json, atexit
#from pid import pidpy as PIDController
import RPi.GPIO as GPIO
from lcd import lcddriver
import glob
from PID_SM import PID as pidsm
# logging.basicConfig()
logger = logging.getLogger('ispresso')
# REMOTE DEBUG -- TODO: Remove this before going to production
# import rpdb2
# rpdb2.start_embedded_debugger('funkymonkey', fAllowRemote = True)
gpio_heat = 24
gpio_pump = 23
gpio_btn_heat_led = 8
gpio_btn_heat_sig = 7
gpio_btn_pump_led = 10
gpio_btn_pump_sig = 9
gpio_btn_brew_pump_sig=5
gpio_btn_steam_pump_sig=6
gpio_btn_steam_switch_sig=19
gpio_btn_pwr_switch_sig=13
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(gpio_btn_heat_sig, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(gpio_btn_pump_sig, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(gpio_btn_brew_pump_sig, GPIO.IN, pull_up_down=GPIO.PUD_UP) #, pull_up_down=GPIO.PUD_DOWN
GPIO.setup(gpio_btn_steam_pump_sig, GPIO.IN, pull_up_down=GPIO.PUD_UP) #, pull_up_down=GPIO.PUD_DOWN
GPIO.setup(gpio_btn_steam_switch_sig, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) #, pull_up_down=GPIO.PUD_DOWN
GPIO.setup(gpio_btn_pwr_switch_sig, GPIO.IN) #, pull_up_down=GPIO.PUD_DOWN
GPIO.setup(gpio_heat, GPIO.OUT)
GPIO.setup(gpio_pump, GPIO.OUT)
GPIO.setup(gpio_btn_heat_led, GPIO.OUT)
GPIO.setup(gpio_btn_pump_led, GPIO.OUT)
def logger_init():
logger.setLevel(logging.DEBUG)
log_file_size = 1024 * 1024 * 1 # 1 MB
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(process)d - %(name)s : %(message)s')
fh = logging.handlers.RotatingFileHandler('/var/log/ispresso.log', maxBytes=log_file_size, backupCount=5)
fh.setFormatter(formatter)
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(sh)
logger.info('******************************************')
logger.info('Starting up...')
def initialize():
settings.load()
if setup.wifi_connect() == False: # this needs to happen after lcd pipe is set up
logger.warn("WiFi can't connect to internet. Entering Smart Connect mode. Connect to iSPRESSO wireless network.")
mem.lcd_connection.send(["iSPRESSO WiFi", "Access Point", 0])
setup.smart_connect()
else:
logger.info("WiFi connection looks ok")
mem.lcd_connection.send(["iSPRESSO", "WiFi OK", 1])
mem.lcd_connection.send(["iSPRESSO", "", 0])
class mem: # global class
cache_day = None
cache_start_time = None
cache_end_time = None
heat_connection = Pipe()
lcd_connection = Pipe()
brew_connection = Pipe()
cloud_connection = Pipe()
flag_pump_on = False
sched_flag_on = False
sched_flag_off = False
time_heat_button_pressed = time.time()
scheduler_enabled = True
presoak_time = 3
wait_time = 2
brew_time = 25
one_wire = None
flag_brewSwitch_on = False
flag_steamSwitch_on = False
flag_steam_mode = False
flag_pwr_on = False
class globalvars(object):
def __init__(self, initval = 0):
self.temperature = multiprocessing.Value("i", initval)
def set_temp(self, n=0):
with self.temperature.get_lock():
self.temperature.value = n
@property
def temp(self):
with self.temperature.get_lock():
return self.temperature.value
class param:
mode = "off"
cycle_time = 1.0
duty_cycle = 0.0
set_point = 198
set_point_steam = 180
k_param = 5.8 # was 6
i_param = 24 # was 120
d_param = 6 # was 5
brew_k_param = 1 # was 6
brew_i_param = 0.0001 # was 120
brew_d_param = 100
brew_flag = False
steam_flag = False
brewdata={}
def add_global_hook(parent_conn, statusQ):
# mem.heat_connection = parent_conn
g = web.storage({"parent_conn" : parent_conn, "statusQ" : statusQ})
def _wrapper(handler):
web.ctx.globals = g
return handler()
return _wrapper
class advanced:
def __init__(self):
self.mode = param.mode
self.cycle_time = param.cycle_time
self.duty_cycle = param.duty_cycle
self.set_point = param.set_point
self.set_point_steam = param.set_point_steam
self.k_param = param.k_param
self.i_param = param.i_param
self.d_param = param.d_param
self.brew_k_param = param.brew_k_param
self.brew_i_param = param.brew_i_param
self.brew_d_param = param.brew_d_param
def GET(self):
return render.advanced(self.mode, self.set_point, self.set_point_steam, self.duty_cycle, self.cycle_time,
self.k_param, self.i_param, self.d_param, self.brew_k_param, self.brew_i_param, self.brew_d_param)
def POST(self):
data = web.data()
datalist = data.split("&")
for item in datalist:
datalistkey = item.split("=")
if datalistkey[0] == "mode":
self.mode = datalistkey[1]
if datalistkey[0] == "setpoint":
self.set_point = float(datalistkey[1])
if datalistkey[0] == "setpointsteam":
self.set_point_steam = float(datalistkey[1])
if datalistkey[0] == "dutycycle":
self.duty_cycle = float(datalistkey[1])
if datalistkey[0] == "cycletime":
self.cycle_time = float(datalistkey[1])
if datalistkey[0] == "k":
self.k_param = float(datalistkey[1])
if datalistkey[0] == "i":
self.i_param = float(datalistkey[1])
if datalistkey[0] == "d":
self.d_param = float(datalistkey[1])
if datalistkey[0] == "brew_k":
self.brew_k_param = float(datalistkey[1])
if datalistkey[0] == "brew_i":
self.brew_i_param = float(datalistkey[1])
if datalistkey[0] == "brew_d":
self.brew_d_param = float(datalistkey[1])
param.mode = self.mode
param.cycle_time = self.cycle_time
param.duty_cycle = self.duty_cycle
param.set_point = self.set_point
param.set_point_steam = self.set_point_steam
param.k_param = self.k_param
param.i_param = self.i_param
param.d_param = self.d_param
param.brew_k_param = self.brew_k_param
param.brew_i_param = self.brew_i_param
param.brew_d_param = self.brew_d_param
settings.save()
web.ctx.globals.parent_conn.send([self.mode, self.cycle_time, self.duty_cycle, self.set_point, self.set_point_steam,
self.k_param, self.i_param, self.d_param, self.brew_k_param, self.brew_i_param, self.brew_d_param,
False, param.steam_flag, param.brew_flag])
#mono and sensor for single color
def gettempProc(global_vars, conn):
p = current_process()
logger = logging.getLogger('ispresso').getChild("getTempProc")
logger.info('Starting:' + p.name + ":" + str(p.pid))
try:
while (True):
t = time.time()
time.sleep(.1) # .1+~.83 = ~1.33 seconds
num = tempdata()
elapsed = "%.2f" % (time.time() - t)
conn.send([num, elapsed])
fah = (9.0 / 5.0) * num + 32
global_vars.set_temp(int(fah)) # convert to int before storing the global var
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
def getonofftime(cycle_time, duty_cycle):
duty = duty_cycle / 100.0
#logger.debug("duty="+str(duty)+ " Duty_cycle="+str(duty_cycle))
on_time = cycle_time * (duty)
off_time = cycle_time * (1.0 - duty)
return [on_time, off_time]
def tellHeatProc(heat_mode=None, flush_cache=None, duty_cycle = None):
if flush_cache is None:
flush_cache = False
if heat_mode is not None:
param.mode = heat_mode
if duty_cycle is not None:
param.duty_cycle = duty_cycle
#logger.debug("told duty cycle: " + str(duty_cycle))
mem.heat_connection.send([param.mode, param.cycle_time, param.duty_cycle, param.set_point, param.set_point_steam, param.k_param, param.i_param, param.d_param,
param.brew_k_param, param.brew_i_param, param.brew_d_param,flush_cache, param.steam_flag, param.brew_flag])
def heatProc(cycle_time, duty_cycle, conn):
p = current_process()
logger = logging.getLogger('ispresso').getChild("heatProc")
logger.info('Starting:' + p.name + ":" + str(p.pid))
try:
while (True):
while (conn.poll()): # get last
cycle_time, duty_cycle = conn.recv()
conn.send([cycle_time, duty_cycle])
if duty_cycle == 0:
GPIO.output(gpio_heat, GPIO.LOW)
time.sleep(cycle_time)
elif duty_cycle == 100:
GPIO.output(gpio_heat, GPIO.HIGH)
time.sleep(cycle_time)
else:
on_time, off_time = getonofftime(cycle_time, duty_cycle)
GPIO.output(gpio_heat, GPIO.HIGH)
time.sleep(on_time)
GPIO.output(gpio_heat, GPIO.LOW)
time.sleep(off_time)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
def lcdControlProc(lcd_child_conn):
p = current_process()
logger = logging.getLogger("ispresso").getChild("lcdControlProc")
logger.info('Starting:' + p.name + ":" + str(p.pid))
lcd = lcddriver.lcd()
last_line1 = ""
last_line2 = ""
while (True):
time.sleep(0.25)
while lcd_child_conn.poll():
try:
line1, line2, duration = lcd_child_conn.recv()
if line1 is not None:
if last_line1 != line1:
time.sleep(0.01)
lcd.lcd_display_string(line1.ljust(16), 1)
last_line1 = line1
time.sleep(duration)
if line2 is not None:
if last_line2 != line2:
time.sleep(0.01)
lcd.lcd_display_string(line2.ljust(16), 2)
last_line2 = line2
time.sleep(duration)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
subprocess.call(['i2cdetect', '-y', '1'])
try:
lcd = None
time.sleep(0.1)
lcd = lcddriver.lcd()
time.sleep(0.1)
except:
logger.error("Trying to re-initialize the LCD by nulling it out and re-instantiating. Couldln't pull it off :(")
continue
def brewControlProc(brew_child_conn):
p = current_process()
logger = logging.getLogger("ispresso").getChild("brewControlProc")
logger.info('Starting:' + p.name + ":" + str(p.pid))
try:
mem.flag_pump_on = False
button_bounce_threshold_secs = 1
while(True):
time_button_pushed, brew_plan = brew_child_conn.recv() # BLOCKS until something shows up
mem.flag_pump_on = True
for listitem in brew_plan:
if mem.flag_pump_on == False:
while brew_child_conn.poll(): # clear out anything other button presses in the queue
brew_child_conn.recv()
break
action = listitem[0]
duration = listitem[1]
counter = 0
start_time = time.time()
if action.upper() in ("PRESOAK", "BREW"):
GPIO.output(gpio_btn_pump_led, GPIO.HIGH)
GPIO.output(gpio_pump, GPIO.HIGH)
while ((counter < duration) & mem.flag_pump_on) : # might not need the check for flag_pump_on here, as its above
time.sleep(0.1)
if brew_child_conn.poll(): # mem.brew_connection.poll() returns TRUE or FALSE immediately and does NOT block
time_button_pushed_again, throwaway_brew_plan = brew_child_conn.recv() # get item off the list, check how long since time_button_pushed, against button_bounce_threshold_secs. If too short, clean up and exit this loop
if time_button_pushed_again - time_button_pushed > button_bounce_threshold_secs:
GPIO.output(gpio_pump, GPIO.LOW)
GPIO.output(gpio_btn_pump_led, GPIO.LOW)
mem.flag_pump_on = False
mem.lcd_connection.send([None, "", 0])
break
if (time.time() - start_time) >= counter:
counter = counter + 1
message = action + 'ing ' + str(duration - counter) + 's'
mem.lcd_connection.send([None, message, 0])
logger.debug(message)
GPIO.output(gpio_pump, GPIO.LOW)
GPIO.output(gpio_btn_pump_led, GPIO.LOW)
mem.lcd_connection.send([None, '', 0])
while brew_child_conn.poll(): # clear out anything other button presses in the queue
brew_child_conn.recv()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
finally:
GPIO.output(gpio_pump, GPIO.LOW)
GPIO.output(gpio_btn_pump_led, GPIO.LOW)
def brewTimerProc(brewTimer_child_conn):
p = current_process()
logger.info('Starting:' + p.name + ":" + str(p.pid))
try:
param.brew_flag = False
button_bounce_threshold_secs = 0.75
heat_boost_time = 3
rebound_protect_time = 1
while(True):
time_button_pushed = brewTimer_child_conn.recv() # BLOCKS until something shows up
time.sleep(button_bounce_threshold_secs)
if GPIO.input(gpio_btn_brew_pump_sig) == GPIO.LOW:
logger.debug("not a bounce, brew high")
param.brew_flag = True
brew_time = 0
while (param.brew_flag) : # might not need the check for flag_pump_on here, as its above
#time.sleep(0.1)
brew_time= time.time()-time_button_pushed
message = ("Brewing %.1f s" % brew_time)
mem.lcd_connection.send([None, message, 0])
if brew_time < heat_boost_time:
duty_cycle=85
tellHeatProc("manual",None,duty_cycle)#turn heater on manual 100# duty cycle to help out the PID process
#logger.debug("boost time duty cycle" + str(param.duty_cycle))
else:
duty_cycle = 65
tellHeatProc("auto",None, duty_cycle)# back to auto mode
#tellHeatProc("manual", None, duty_cycle)
if brewTimer_child_conn.poll(): # mem.brew_connection.poll() returns TRUE or FALSE immediately and does NOT block
time_button_pushed_again = brewTimer_child_conn.recv() # get item off the list, check how long since time_button_pushed, against button_bounce_threshold_secs. If too short, clean up and exit this loop
if time_button_pushed_again - time_button_pushed > button_bounce_threshold_secs:
tellHeatProc("auto")
mem.lcd_connection.send([None, "", 0])
message = ("Brewed %.1f s" % brew_time)
mem.lcd_connection.send([None, message, 3])
logger.debug(message)
mem.lcd_connection.send([None,"",0])
tellHeatProc("off",None, 0)
param.brew_flag= False
time.sleep(rebound_protect_time) #protect from rebound heating of empty heater
tellHeatProc("auto", None, 0)
else:
logger.debug("Brew Button Bounced")
#break
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
finally:
logger.info("brewed for " + str(brew_time))
#mem.flag_pump_on=False
#GPIO.output(gpio_pump, GPIO.LOW)
#GPIO.output(gpio_btn_pump_led, GPIO.LOW)
def tempControlProc(global_vars, mode, cycle_time, duty_cycle, set_point, set_point_steam, k_param, i_param, d_param, brew_k_param, brew_i_param, brew_d_param, statusQ, conn, steam_flag, brew_flag):
p = current_process()
logger = logging.getLogger('ispresso').getChild("tempControlProc")
logger.info('Starting:' + p.name + ":" + str(p.pid))
try:
parent_conn_temp, child_conn_temp = Pipe()
ptemp = Process(name="gettempProc", target=gettempProc, args=(global_vars, child_conn_temp,))
ptemp.daemon = True
ptemp.start()
parent_conn_heat, child_conn_heat = Pipe()
pheat = Process(name="heatProc", target=heatProc, args=(cycle_time, duty_cycle, child_conn_heat))
pheat.daemon = True
pheat.start()
pid=pidsm(cycle_time, k_param, i_param, d_param)
#pid = PIDController.pidpy(cycle_time, k_param, i_param, d_param) # init pid
flush_cache = False
last_temp_C = 0
brew_temps = []
brew_pressures = []
pressure = 0
was_brew = False
overtemp = False
while (True):
#time.sleep(0.1)
#pid = PIDController.pidpy(cycle_time, k_param, i_param, d_param) # init pid
#logger.debug("cycle time: " + str(cycle_time) + " k: "+str(k_param)+" i: "+str(i_param)+" d: "+ str(d_param))
readytemp = False
while parent_conn_temp.poll():
temp_C, elapsed = parent_conn_temp.recv() # non blocking receive
mode = scheduled_mode(mode) # check to see if scheduler should fire on or off -- MOVING THIS as the OFF doesnt seem to fire..
if temp_C > 0: # the 1-wire sensor sometimes comes back with 0 -- need to fix that by holding on to last value.
last_temp_C = temp_C
else:
temp_C = last_temp_C
temp_F = (9.0 / 5.0) * temp_C + 32
temp_C_str = "%3.2f" % temp_C
temp_F_str = "%3.2f" % temp_F
temp_F_str2 = "%3.1f" % temp_F
temp_F_pretty = "%3.0f" % temp_F
mem.lcd_connection.send(['Probably ' + str(temp_F_str2) + ' F', None, 0])
readytemp = True
#logger.debug("Temp F: "+ temp_F_pretty)
if temp_F > 260: #over temperature sensing
readytemp = False
duty_cycle = 0
#mode = "off"
#tellHeatProc(mode)
parent_conn_heat.send([cycle_time, duty_cycle])
GPIO.output(gpio_btn_heat_led, GPIO.LOW)
logger.error("Temperature exceeding sensor range. Shut off")
mem.lcd_connection.send([None,"Oh No! Too Hot!",0])
if (not statusQ.full()):
statusQ.put([temp_F_str, elapsed, mode, cycle_time, duty_cycle, set_point, set_point_steam,
k_param, i_param, d_param, brew_k_param, brew_i_param, brew_d_param]) # GET request
overtemp = True
else: overtemp = False
if overtemp == False:
while parent_conn_heat.poll(): # non blocking receive
cycle_time, duty_cycle = parent_conn_heat.recv()
while conn.poll(): # POST settings
mode, cycle_time, duty_cycle_temp, set_point, set_point_steam, k_param, i_param, d_param, brew_k_param, brew_i_param, brew_d_param, flush_cache, steam_flag, brew_flag = conn.recv()
if not brew_flag:
if steam_flag == True and mode == "auto":
mem.lcd_connection.send([None, 'Mode: Steam', 0])
set_point = set_point_steam
elif (mode == "auto" or mode == "manual"):
mem.lcd_connection.send([None, 'Mode: Brew', 0])
else:
mem.lcd_connection.send([None, 'Mode: OFF',0])
if readytemp == True:
if temp_F < (set_point-15): #use different PID parameters for turn-on
k_param_init = 1.7
i_param_init = 0
d_param_init = 7.98
pid.SetTunings(k_param_init, i_param_init, d_param_init)
elif brew_flag == True:
time_now = time.time()
if brew_flag and not was_brew:
start_time=time_now
brew_data = {'times': [],
'temperatures':[],
'pressures':[]
}
pid.SetTunings(brew_k_param, brew_i_param, brew_d_param)
brew_data['times'].append(time_now-start_time)
brew_data['temperatures'].append(temp_F)
brew_data['pressures'].append(pressure)
was_brew = True
else:
pid.SetTunings(k_param, i_param, d_param)
if mode == "auto":
pid.SetMode(mode,duty_cycle)
duty_cycle = pid.compute(temp_F, set_point)
parent_conn_heat.send([cycle_time, duty_cycle])
GPIO.output(gpio_btn_heat_led, GPIO.HIGH)
elif mode == "off":
duty_cycle = 0
parent_conn_heat.send([cycle_time, duty_cycle])
GPIO.output(gpio_btn_heat_led, GPIO.LOW)
pid.SetMode(mode, duty_cycle)
elif mode == "manual":
duty_cycle = duty_cycle_temp
parent_conn_heat.send([cycle_time, duty_cycle])
pid.SetMode(mode,duty_cycle)
if was_brew and not brew_flag:
#save to array for Dan's code
if max(brew_data['times'])>15:
logger.debug("time to send all the data")
brew_data['brewed_at'] = int(round(time.time()))
brew_data['brew_style_id'] = 1 #brew style espresso
t = threading.Thread(target = sendToPercolate, args = (brew_data,))
t.start()
was_brew = False
if (not statusQ.full()):
statusQ.put([temp_F_str, elapsed, mode, cycle_time, duty_cycle, set_point, set_point_steam, k_param, i_param, d_param,
param.brew_k_param, param.brew_i_param, param.brew_d_param,]) # GET request
readytemp == False
if flush_cache:
mem.cache_day = None # this should force cache flush
flush_cache = False
mode = scheduled_mode(mode) # check to see if scheduler should fire on or off
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
def sendToPercolate(brewData):
logger = logging.getLogger('ispresso').getChild("sendToPercolate")
logger.debug("got to the percolate part")
try:
requests.post(url = 'http://192.168.1.202/brews/espresso',data = json.dumps(brewData))
r=requests.post(url = 'http://192.168.1.22:8000/api/brews', json = brewData)
logger.debug(str(r.status_code)+ " " + str(r.text))
logger.debug("job well done, dan")
except:
#logger.debug("something went wrong with POST. Probably Dan's fault")
logger.debug("error sending data to server. Probably Dan's fault")
class brewdata:
def __init__(self):
pass
def GET(self):
return json.dumps(param.brewdata)
def POST(self):
data = web.data() # web.input gives back a Storage < > thing
param.brewdata = json.loads(data)
class getstatus:
def __init__(self):
pass
def GET(self): # blocking receive
if (statusQ.full()): # remove old data
for i in range(statusQ.qsize()):
temp, elapsed, mode, cycle_time, duty_cycle, set_point, set_point_steam, k_param, i_param, d_param, brew_k_param, brew_i_param, brew_d_param = web.ctx.globals.statusQ.get()
temp, elapsed, mode, cycle_time, duty_cycle, set_point, set_point_steam, k_param, i_param, d_param, brew_k_param, brew_i_param, brew_d_param = web.ctx.globals.statusQ.get()
out = json.dumps({"temp" : temp, "elapsed" : elapsed, "mode" : mode, "cycle_time" : cycle_time, "duty_cycle" : duty_cycle,
"set_point" : set_point, "set_point_steam" : set_point_steam, "k_param" : k_param, "i_param" : i_param, "d_param" : d_param,
"brew_k_param" : brew_k_param, "brew_i_param" : brew_i_param, "brew_d_param" : brew_d_param, "pump" : mem.flag_pump_on})
return out
def POST(self):
pass
@staticmethod
def get_temp():
if (statusQ.full()): # remove old data
for i in range(statusQ.qsize()):
temp, elapsed, mode, cycle_time, duty_cycle, set_point, set_point_steam, k_param, i_param, d_param, brew_k_param, brew_i_param, brew_d_param = web.ctx.globals.statusQ.get()
temp, elapsed, mode, cycle_time, duty_cycle, set_point, set_point_steam, k_param, i_param, d_param, brew_k_param, brew_i_param, brew_d_param = web.ctx.globals.statusQ.get()
out = json.dumps({"temp" : temp, "elapsed" : elapsed, "mode" : mode, "cycle_time" : cycle_time, "duty_cycle" : duty_cycle,
"set_point" : set_point, "set_point_steam" : set_point_steam, "k_param" : k_param, "i_param" : i_param, "d_param" : d_param,
"brew_k_param" : brew_k_param, "brew_i_param" : brew_i_param, "brew_d_param" : brew_d_param, "pump" : mem.flag_pump_on})
return out["temp"]
class settings:
def GET(self):
with open("settings.json") as f:
filecontents = json.load(f)
return render.settings(json.dumps(filecontents)) # a JSON object (string) at this point
def POST(self):
data = web.data() # web.input gives back a Storage < > thing
mydata = json.loads(data)
for datalistkey in mydata:
logger.debug("datalistkey = " + str(datalistkey))
if datalistkey == "temp":
param.set_point = int(mydata[datalistkey])
logger.debug("temp changed to " + str(mydata[datalistkey]))
if datalistkey == "brewSecs":
mem.brew_time = int(mydata[datalistkey])
logger.debug("brew secs changed")
if datalistkey == "soakSecs":
mem.presoak_time = int(mydata[datalistkey])
logger.debug("soak secs changed")
if datalistkey == "waitSecs":
mem.wait_time = int(mydata[datalistkey])
logger.debug("wait secs changed")
if datalistkey == "stemp":
param.set_point_steam = int(mydata[datalistkey])
logger.debug("Steam temp changed to " + str(mydata[datalistkey]))
logger.debug("Settings updated: " + str(mydata))
settings.save()
@staticmethod
def load():
with open("settings.json") as loadFile:
my_settings = json.load(loadFile)
mem.brew_time = my_settings["brewSecs"]
mem.presoak_time = my_settings["soakSecs"]
mem.wait_time = my_settings["waitSecs"]
param.set_point = my_settings["temp"]
param.set_point_steam = my_settings["stemp"]
param.k_param = my_settings["p_value"]
param.i_param = my_settings["i_value"]
param.d_param = my_settings["d_value"]
param.brew_k_param = my_settings["brew_p_value"]
param.brew_i_param = my_settings["brew_i_value"]
param.brew_d_param = my_settings["brew_d_value"]
@staticmethod
def save():
with open("settings.json") as saveFile:
my_settings = json.load(saveFile)
my_settings['brewSecs'] = mem.brew_time
my_settings['soakSecs'] = mem.presoak_time
my_settings['waitSecs'] = mem.wait_time
my_settings['temp'] = param.set_point
my_settings['stemp'] = param.set_point_steam
my_settings['p_value'] = param.k_param
my_settings['i_value'] = param.i_param
my_settings['d_value'] = param.d_param
my_settings['brew_p_value'] = param.brew_k_param
my_settings['brew_i_value'] = param.brew_i_param
my_settings['brew_d_value'] = param.brew_d_param
logger.debug("About to save settings = " + str(my_settings))
with open("settings.json", "wb") as output_file:
json.dump(my_settings, output_file)
class ispresso:
def __init__(self):
self.mode = param.mode
self.cycle_time = param.cycle_time
self.duty_cycle = param.duty_cycle
self.set_point = param.set_point
self.set_point_steam = param.set_point_steam
self.k_param = param.k_param
self.i_param = param.i_param
self.d_param = param.d_param
self.brew_k_param = param.brew_k_param
self.brew_i_param = param.brew_i_param
self.brew_d_param = param.brew_d_param
def GET(self):
#return render.ispresso()
return render.ispresso(self.mode, self.set_point, self.set_point_steam, self.duty_cycle, self.cycle_time,
self.k_param, self.i_param, self.d_param, self.brew_k_param, self.brew_i_param, self.brew_d_param)
def POST(self):
op = ""
flag = ""
data = web.data()
datalist = data.split("&")
for item in datalist:
datalistkey = item.split("=")
if datalistkey[0] == "operation":
op = datalistkey[1]
if datalistkey[0] == "flag":
flag = datalistkey[1]
if str(op).upper() == "HEAT":
if flag == "on":
tellHeatProc("auto")
else:
tellHeatProc("off")
elif str(op).upper() == "PUMP":
time_stamp = time.time()
brew_plan = [['Presoak', mem.presoak_time], ['Wait', mem.wait_time], ['Brew', mem.brew_time]]
logger.debug("Caught POST, Pump button. brewing ... " + str(brew_plan))
mem.brew_connection.send([time_stamp, brew_plan])
def brew(self):
time_stamp = time.time()
brew_plan = [['Presoak', mem.presoak_time], ['Wait', mem.wait_time], ['Brew', mem.brew_time]]
logger.debug("called brew method ... " + str(brew_plan))
mem.brew_connection.send([time_stamp, brew_plan])
def scheduled_mode(old_mode):
try:
now = datetime.now()
today = datetime.isoweekday(datetime.now())
if today == 7:
today = 0
if mem.cache_day is None or mem.cache_day != today: # refresh cache, reset flags, turn off heat
logger.debug("scheduled_mode: cache flush or new day. resetting flags, turning off heat.")
mem.cache_day = today
mem.sched_flag_off = False
mem.sched_flag_on = False
with open("schedule.json") as f:
my_schedule = json.load(f) # t= time.strptime("00:05:42.244", "%H:%M:%S")
mem.cache_start_time = my_schedule['days'][today]['time']['startTime']
mem.cache_start_time = now.replace(hour=int(mem.cache_start_time.split(":")[0]), minute=int(mem.cache_start_time.split(":")[1]))
mem.cache_end_time = my_schedule['days'][today]['time']['endTime']
mem.cache_end_time = now.replace(hour=int(mem.cache_end_time.split(":")[0]), minute=int(mem.cache_end_time.split(":")[1]))
return "off"
if now < mem.cache_start_time:
return old_mode
if now > mem.cache_start_time and now < mem.cache_end_time:
if mem.sched_flag_on:
return old_mode
else: # start flag NOT set
mem.sched_flag_on = True # set flag
logger.debug("scheduled_mode: going AUTO")
return "auto"
if now > mem.cache_end_time:
if mem.sched_flag_off:
return old_mode
else: # end flag NOT set
mem.sched_flag_off = True # set end flag
logger.debug("scheduled_mode: going OFF")
return "off"
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
class setup:
def GET(self):
try:
mySsidList = setup.get_ssid_list()
return render.setup(mySsidList)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
def POST(self): # catch the inputs, put them into a config file, then call a shell script
try:
input = web.input()
protocol = input.protocol
ssid = input.ssid
passwd = input.passwd
if protocol == "personal":
logger.debug("doing config for WPA personal. ssid = " + ssid)
with open('/var/www/setup/interfaces_default', 'r') as file:
lines = file.readlines()
for idx, line in enumerate(lines):
if line.find("wpa-ssid") > -1:
lines[idx] = ' wpa-ssid "' + ssid + '"\n'
if line.find("wpa-psk") > -1:
lines[idx] = ' wpa-psk "' + passwd + '"\n'
if line.find("pre-up") > -1:
lines[idx] = ' # pre-up wpa_supplicant \n'
if line.find("post-down") > -1:
lines[idx] = ' # post-down # wpa_supplicant \n'
with open('/var/www/setup/ssid/' + ssid + '/interfaces', 'w') as file:
file.writelines(lines)
subprocess.call("/var/www/setup/default.sh 2>&1 >> /var/log/smartconnect.log", shell=True) # , Shell=True
elif protocol == "enterprise":
mycert = web.input(ca_cert={})
filename = ""
filedir = '/etc/certs/' # change this to the directory you want to store the file in.
if 'ca_cert' in mycert: # to check if the file-object is created
filepath = mycert.ca_cert.filename.replace('\\', '/') # replaces the windows-style slashes with linux ones.
filename = filepath.split('/')[-1] # splits the and chooses the last part (the filename with extension)
filename = filedir + filename # put together with my path
fout = open(filename, 'w') # creates the file where the uploaded file should be stored
fout.write(mycert.ca_cert.file.read()) # writes the uploaded file to the newly created file.
fout.close() # closes the file, upload complete.
logger.debug("SETUP: Enterprise - cert file written: " + filename)
with open ('/var/www/setup/interfaces_default', 'r') as file:
lines = file.readlines()
for idx, line in enumerate(lines):
if line.find("wpa-ssid") > -1:
lines[idx] = ' wpa-ssid "' + ssid + '"\n'
if line.find("wpa-psk") > -1:
lines[idx] = '# wpa-psk \n' # commenting out the PSK line for Enterprise, we're going to do wpa-supplicant instead
if line.find("pre-up") > -1:
lines[idx] = ' pre-up wpa_supplicant -B -Dwext -i wlan0 -c/etc/wpa_supplicant/wpa_supplicant.conf -f /var/log/wpa_supplicant.log \n'
if line.find("post-down") > -1:
lines[idx] = ' post-down killall -q wpa_supplicant \n'
with open('/var/www/setup/ssid/' + ssid + '/interfaces', 'w') as file:
file.writelines(lines)
with open ('/var/www/setup/wpa_supplicant.conf', 'r') as file:
lines = file.readlines()
for idx, line in enumerate(lines):
if line.find(" ssid") > -1: # need the trailing space so it doesnt squash scan_ssid field
lines[idx] = ' ssid="' + ssid + '"\n'
if line.find("key_mgmt") > -1:
lines[idx] = ' key_mgmt=' + input.key_mgmt + '\n'
if line.find("pairwise") > -1:
lines[idx] = ' pairwise=' + input.pairwise + '\n'
if line.find("group") > -1:
lines[idx] = ' group=' + input.group + '\n'
if line.find("psk") > -1:
lines[idx] = ' psk="' + input.psk + '"\n'
if line.find("eap") > -1:
lines[idx] = ' eap=' + input.eap + '\n'
if line.find("identity") > -1:
lines[idx] = ' identity="' + input.identity + '"\n'
if line.find("password") > -1:
lines[idx] = ' password="' + passwd + '"\n'
if line.find("ca_cert=") > -1 : # need the trailing = so it doesn't squash ca_cert2 field
lines[idx] = ' ca_cert="' + filename + '"\n'
with open('/var/www/setup/ssid/' + ssid + '/wpa_supplicant.conf', 'w') as file:
file.writelines(lines)
subprocess.call("/var/www/setup/default.sh 2>&1 >> /var/log/smartconnect.log", shell=True) # , Shell=True
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
@staticmethod
def get_ssid_list():
try:
iwlist_cmd = "iwlist wlan0 scanning | grep ESSID"
proc = subprocess.Popen(iwlist_cmd, shell=True, stdout=subprocess.PIPE)
myNwList = []
while True:
line = proc.stdout.readline()
if line != '':
line = line[line.find('"') + 1 : len(line) - 2]
myNwList.append(line)
else:
break
return myNwList
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
def get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
@staticmethod
def check_connected(): # assumes we have a wifi configuration in place in /etc/network/interfaces and we want to test it
try:
url = "http://google.com"
response = requests.get(url)
return True
except:
pass
return False
@staticmethod
def wifi_connect():
try:
if setup.check_connected():
return True
my_ssid_list = setup.get_ssid_list()
my_subdir_list = setup.get_immediate_subdirectories("/var/www/setup/ssid/")
for ssid in my_ssid_list: # need to compare lists, and try out each one that matches
if ssid in my_subdir_list: # attempt connection - move file(s) into place, and recycle ifdown & ifup
logger.debug("wifi_connect: trying ssid = " + ssid)
shutil.copy2("/var/www/setup/ssid/" + ssid + "/interfaces", "/etc/network/interfaces")
if os.path.isfile("/var/www/setup/ssid/" + ssid + "/wpa_supplicant.conf"):
shutil.copy2("/var/www/setup/ssid/" + ssid + "/wpa_supplicant.conf", "/etc/wpa_supplicant/wpa_supplicant.conf")
my_cmd = "sudo ifdown wlan0 && sudo ifup wlan0"
proc = subprocess.Popen(my_cmd, shell=True, stdout=subprocess.PIPE)
if check_connected():
return True
return False
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
@staticmethod
def smart_connect():
logger.debug("Calling SmartConnect setup.sh")
subprocess.call("/var/www/setup/smartconnect.sh 2>&1 >> /var/log/smartconnect.log", shell=True)
class schedule:
def GET(self):
with open("schedule.json") as f:
filecontents = json.load(f)
return render.schedule(json.dumps(filecontents), str(datetime.now())) # a JSON object (string) at this point
def POST(self):
data = web.data() # web.input gives back a Storage < > thing
mydata = json.loads(data)
with open("schedule.json") as f:
my_schedule = json.load(f)
week = {'Sunday':0, 'Monday':1, 'Tuesday':2, 'Wednesday':3, 'Thursday':4, 'Friday':5, 'Saturday':6}
my_schedule['days'][week[mydata['day']]]['time']['startTime'] = mydata['time']['startTime']
my_schedule['days'][week[mydata['day']]]['time']['endTime'] = mydata['time']['endTime']
tellHeatProc(None, True) # FLUSH the cache so that the other process picks up the changes
with open("schedule.json", "wb") as output_file:
json.dump(my_schedule, output_file)
return json.dumps("OK")
def tempdata():
try:
one_wire = mem.one_wire # gets set below, on init "/sys/bus/w1/devices/28-000004e0badb/w1_slave"
pipe = Popen(["cat", one_wire], stdout=PIPE)
result = pipe.communicate()[0]
result_list = result.split("=")
try:
temp_C = float(result_list[-1]) / 1000 # temp in Celcius
except ValueError: # probably means we can't read the 1-wire sensor
logger.warn('Could not get a value from 1-wire connector. Using ' + one_wire )
temp_C = 0
return temp_C
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
def catchButton(btn): # GPIO
try:
time.sleep(0.25)
#if GPIO.input(btn) != GPIO.HIGH: # check to see if the input button is still high, protect against EMI false positive
# return
if (GPIO.input(gpio_btn_heat_sig) == GPIO.HIGH & GPIO.input(gpio_btn_pump_sig) == GPIO.HIGH): # both buttons pressed
mem.lcd_connection.send(["Live long", "and prosper!", 1]) # easter egg
mem.lcd_connection.send(["iSPRESSO", "", 0]) # easter egg
logger.info("You found an easter egg!")
return
if btn == gpio_btn_heat_sig:
now = time.time()
if now - mem.time_heat_button_pressed < 1:
mem.time_heat_button_pressed = now
return
mem.time_heat_button_pressed = now
if param.mode == "off":
GPIO.output(gpio_btn_heat_led, GPIO.HIGH) # this is a bit of a hack because the temp control also regulates the LED but putting it here gives better user experience.
logger.debug("catchButton: telling Heat Proc AUTO (ON) ")
tellHeatProc("auto")
else:
GPIO.output(gpio_btn_heat_led, GPIO.LOW)
logger.debug("catchButton: telling Heat Proc OFF")
tellHeatProc("off")
#elif btn == gpio_btn_pump_sig:
#if mem.flag_pwr_on == True:
#logger.debug("catchButton: telling Brew Proc (toggle)")
#time_stamp = time.time()
#brew_plan = [['Presoak', mem.presoak_time], ['Wait', mem.wait_time], ['Brew', mem.brew_time]]
#mem.brew_connection.send([time_stamp, brew_plan])
elif btn == gpio_btn_brew_pump_sig:
now = time.time
logger.debug("catchButton: Brew pump switched")
time_stamp=time.time()
if param.mode == "auto":
mem.brewTimer_connection.send(time_stamp)
else:
mem.lcd_connection.send(["Machine Off", "Flip pwr to brew",3])
elif btn == gpio_btn_steam_pump_sig:
logger.debug("catchButton: steam pump switched")
time_stamp=time.time()
if GPIO.input(gpio_btn_steam_pump_sig)==GPIO.LOW:
if param.steam_flag == True:
tellHeatProc("auto")
if GPIO.input(gpio_btn_steam_pump_sig)==GPIO.HIGH:
tellHeatProc("auto")
elif btn == gpio_btn_steam_switch_sig:
logger.debug("catchButton: steam/hot water switched")
time_stamp=time.time()
time.sleep(0.1)
if GPIO.input(gpio_btn_steam_switch_sig)== GPIO.LOW:
#GPIO.output(gpio_btn_pump_led, GPIO.LOW) # this is a bit of a hack because the temp control also regulates the LED but putting it here gives better user experience.
logger.debug("Steam Mode OFF")
param.steam_flag = False
#tellHeatProc("auto")
if GPIO.input(gpio_btn_steam_switch_sig) == GPIO.HIGH:
if mem.flag_pwr_on == True:
#GPIO.output(gpio_btn_pump_led, GPIO.HIGH)
logger.debug("Steam Mode ON")
param.steam_flag = True
#mem.lcd_connection.send(["Sorry No", "Steam Mode Yet", 4])
#tellHeatProc("auto")
elif btn == gpio_btn_pwr_switch_sig:
logger.debug("catchButton: power switched")
time_stamp=time.time()
time.sleep(0.1)
if GPIO.input(gpio_btn_pwr_switch_sig)== GPIO.HIGH:
if param.mode == "off":
GPIO.output(gpio_btn_heat_led, GPIO.HIGH) # this is a bit of a hack because the temp control also regulates the LED but putting it here gives better user experience.
logger.debug("catchButton: telling Heat Proc AUTO (ON) ")
tellHeatProc("auto")
mem.flag_pwr_on = True
if GPIO.input(gpio_btn_pwr_switch_sig) == GPIO.LOW:
GPIO.output(gpio_btn_heat_led, GPIO.LOW)
logger.debug("catchButton: telling Heat Proc OFF")
tellHeatProc("off")
mem.flag_pwr_on = False
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
class logdisplay:
def GET(self):
fp = open('/var/log/ispresso.log', 'rU') # reading file from file path
text = fp.read() # no problem found till this line.
fp.close()
return render.logdisplay(text) # calling file_display.html
def cleanUp():
logger.info("Shutting down...")
mem.lcd_connection.send(["iSPRESSO", "Shutting down", 0])
execfile ('shutdown.py')
if __name__ == '__main__':
try:
logger_init()
os.chdir("/var/www")
call(["modprobe", "w1-gpio"])
call(["modprobe", "w1-therm"])
call(["modprobe", "i2c-dev"])
base_dir = '/sys/bus/w1/devices/'
try:
base_dir = glob.glob(base_dir + '28*')[0]
except:
logger.error("EPIC FAIL! 1-Wire Temp sensor not found in " + base_dir)
mem.one_wire = base_dir + '/w1_slave'
urls = ("/", "ispresso", "/settings", "settings", "/schedule", "schedule", "/advanced", "advanced", "/getstatus", "getstatus",
"/logdisplay", "logdisplay", "/setup", "setup", "/echo", "echo", "/brews/espresso","brewdata")
render = web.template.render("/var/www/templates/")
app = web.application(urls, globals())
atexit.register(cleanUp)
statusQ = Queue(2)
parent_conn, child_conn = Pipe()
lcd_parent_conn, lcd_child_conn = Pipe()
mem.lcd_connection = lcd_parent_conn
initialize()
cloud_parent_conn, cloud_child_conn = Pipe()
mem.cloud_connection = cloud_parent_conn
#brew_parent_conn, brew_child_conn = Pipe()
#mem.brew_connection = brew_parent_conn
brewTimer_parent_conn, brewTimer_child_conn = Pipe()
mem.brewTimer_connection = brewTimer_parent_conn
global_vars = globalvars()
GPIO.add_event_detect(gpio_btn_heat_sig, GPIO.RISING, callback=catchButton, bouncetime=250)
GPIO.add_event_detect(gpio_btn_pump_sig, GPIO.RISING, callback=catchButton, bouncetime=250) # was RISING, at one point HIGH. who knows
GPIO.add_event_detect(gpio_btn_brew_pump_sig, GPIO.BOTH, callback=catchButton, bouncetime=500)
GPIO.add_event_detect(gpio_btn_steam_pump_sig, GPIO.BOTH, callback=catchButton, bouncetime=500)
GPIO.add_event_detect(gpio_btn_steam_switch_sig, GPIO.BOTH, callback=catchButton, bouncetime=500)
GPIO.add_event_detect(gpio_btn_pwr_switch_sig, GPIO.BOTH, callback=catchButton, bouncetime=500)
mem.heat_connection = parent_conn
lcdproc = Process(name="lcdControlProc", target=lcdControlProc, args=(lcd_child_conn,))
lcdproc.start()
#brewproc = Process(name="brewControlProc", target=brewControlProc, args=(brew_child_conn,))
#brewproc.start()
brewTimerproc = Process(name="brewTimerProc", target=brewTimerProc, args=(brewTimer_child_conn,))
brewTimerproc.start()
#cloudproc = Process(name="cloudControlProc", target=cloudControlProc, args=(global_vars, brew_parent_conn,))
#cloudproc.start()
p = Process(name="tempControlProc", target=tempControlProc, args=(global_vars, param.mode, param.cycle_time, param.duty_cycle,
param.set_point, param.set_point_steam, param.k_param, param.i_param, param.d_param, param.brew_k_param, param.brew_i_param, param.brew_d_param,
statusQ, child_conn, param.steam_flag, param.brew_flag))
p.start()
app.add_processor(add_global_hook(parent_conn, statusQ))
app.run()
except KeyboardInterrupt:
cleanUp()
sys.exit()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
cleanUp()
sys.exit()
if mem.scheduler_enabled: # if program is just been started, set the mode according to the schedule, assuming schedule is ON
tellHeatProc("auto")
|
sjmalski/ispresso
|
ispresso.py
|
Python
|
mit
| 55,175
|
[
"ESPResSo"
] |
166030bf7e8bc3e950c73f5446d17bcc2d7d90fa0bddd61e172ce4f6aec9c0ec
|
# This program basically works by calling
# the function websiteRate()
# like this --> websiteRate("http://www.example.com")
# or this --> websiteRate(["http://www.example.com","http://www.otherexample.com",...])
# It returns the number of positive words minus the number of negatives words
# and the number of keywords like this [30,3]
# a positive first number (like 30) means its a good website, because more good words were found
# a negative first number indicates the opposite
# 0 means it might be good or bad, depending
# and a high number of keywords found is helpful for finding whether
# the website you're scanning is actually relevant or not.
# This program is useful if you want to quickly scan a few articles you've found and
# see whether they are good for your company or not.
# In the future it could be used with a search api so that we can automatically
# check whether a company is doing well or poorly.
import urllib.request, html.parser,re,http.cookiejar
badWords = ["bad","stolen","fraud","criminal","upset","cheat","fake","theft","attack",
"break-in","worse","unfortunate(?:ly)?","regress","unhealthy","sad","accussed",
"terrible","loss(?:es)?","lose","fail(?:ing)?","down","detiorating","fear(?:ing)?",
"scary","unlucky","unfavorable","unstable","disabling","crippling",
"cripples?","unstatisfactory","unpopular","losing","dwindling","dissolve",
"dissolving","bankrupt","uncertain(?:ty)","downward","falling","lower","lowering",
"sick","less","lessening","loosening","struggle","struggling","limp(ing)*",
"lost","poor","disadvantage","lagging","behind","lackluster","crumbl(ing)*",
"falling apart","scared","intimidated","worried","worrying","threatening",
"threats?","dark","endure","terror","terrorist","hack(?:er)?","crude","cancel","withhold","end"]
goodWords = ["good","boost(?:ed)?","improves?","healthy?","gain(?:ing|ed)?","win(?:ning)?","happy","fortunate(?:ly)?","lucky",
"succeed","success","improving","favorable","secure","stable","approve",
"exciting","beneficial","thriving","thrive","terrific","great","wonderful","super",
"productive","popular(?:ity)?","incredible","novel","stunning","safe",
"quality","high","raise","rise+n*","promising","excellent","extraordinary",
"cutting edge","innovative","positive","advantage","forward","ahead",
"progress","of tomorrow","bright"]
imWords = ["shock(?:ing)*","big","important","chang[e|(?:ing)]","sell","buy","false","crazy","money","opportunity","profit"]
for i in range(len(badWords)): badWords[i] = "[ \"\'\.]"+ badWords[i] + "[ \.\?,\"\']" #\
for i in range(len(goodWords)): goodWords[i] = "[ \"\'\.]"+ goodWords[i] + "[ \.\?,\"\']" #| adjust words for regular expressions
for i in range(len(imWords)): imWords[i] = "[ \"\'\.]"+ imWords[i] + "[ \.\?,\"\']"
#######################
# #
# MAIN FUNCTION #
# #
# Modes: #
# 0 - Good/Bad #
# 1 - Company Search #
# 2 - Important Info #
# #
# #
#######################
def websiteRate(urls,mode=0,extrainfo = ""):
if type(urls) != type(["list"]): urls = [ urls ] # in case you just want to look at one website
class customParser(html.parser.HTMLParser): #parser for html
def handle_data(self, data):
pageData.append(data) # just get data, tags not needed
returnList = []
for url in urls: # for each url
print(url)
try:
pageData = [] #\
foundBWords = [] #|
foundGWords = [] #|
foundIWords = [] #| #| Reset everything
numbers = [] #/
cj = http.cookiejar.CookieJar() #Holder for cookies, needed for websites like nytimes
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
request = urllib.request.Request(url) # get the webpage
webpage = opener.open(request)
webpage = str(webpage.read()) # read it into a string
newParser = customParser() # create parser
newParser.feed(webpage) # parse it
rawData = " ".join(pageData) # turning results into a string
if mode == 0:
for word in badWords: foundBWords += re.findall(word,rawData)
for word in goodWords: foundGWords += re.findall(word,rawData)
numbers = re.findall("[A-Z]?[a-z]+ \$?[0-9]+,?(?:[0-9]|,)*[0-9]*%? [a-z]+",rawData) # find numbers of interest
returnList += [foundBWords,foundGWords,numbers]
returnList.append(numbers)
if len(foundBWords) > len(foundGWords): print("BAD")
else: print("GOOD")
elif mode == 1:
companies = extrainfo.split(",")
found = []
for company in companies: found += re.findall("[\.\, ]"+company+"[\.\,\ ]",rawData)
returnList = len(found)
elif mode == 2:
for word in imWords: foundIWords += re.findall(word,rawData)
numbers = re.findall("[A-Z]?[a-z]+ \$?[0-9]+,?(?:[0-9]|,)*[0-9]*%? [a-z]+",rawData) # find numbers of interest
returnList = foundIWords + numbers
else: print("Mode",mode,"not defined.")
except urllib.request.HTTPError as error:
print("\n\n\nThere was an HTTP error:", error.code,error.reason, "with the url",url)
return returnList
#input neutral words
# create library
#words in title
|
tiw51/DeepPurple
|
Stock_Programs/websiteRater.py
|
Python
|
apache-2.0
| 5,768
|
[
"exciting"
] |
df7066fe830f40a30e7cacc676db609b6236f5e5d31b43c653351733a825b42a
|
from __future__ import division
import numpy as np
from .kernels import KERNELS
from .utils import nfft_matrix, fourier_sum, inv_fourier_sum
def ndft(x, f_hat):
"""Compute the non-equispaced direct Fourier transform
f_j = \sum_{-N/2 \le k < N/2} \hat{f}_k \exp(-2 \pi i k x_j)
Parameters
----------
x : array_like, shape=(M,)
The locations of the data points.
f_hat : array_like, shape=(N,)
The amplitudes at each wave number k = range(-N/2, N/2)
Returns
-------
f : ndarray, shape=(M,)
The direct Fourier summation corresponding to x
See Also
--------
nfft : non-equispaced fast Fourier transform
ndft_adjoint : adjoint non-equispaced direct Fourier transform
nfft_adjoint : adjoint non-equispaced fast Fourier transform
"""
x, f_hat = map(np.asarray, (x, f_hat))
assert x.ndim == 1
assert f_hat.ndim == 1
N = len(f_hat)
assert N % 2 == 0
k = -(N // 2) + np.arange(N)
return np.dot(f_hat, np.exp(-2j * np.pi * x * k[:, None]))
def ndft_adjoint(x, f, N):
"""Compute the adjoint non-equispaced direct Fourier transform
\hat{f}_k = \sum_{0 \le j < N} f_j \exp(2 \pi i k x_j)
where k = range(-N/2, N/2)
Parameters
----------
x : array_like, shape=(M,)
The locations of the data points.
f : array_like, shape=(M,)
The amplitudes at each location x
N : int
The number of frequencies at which to evaluate the result
Returns
-------
f_hat : ndarray, shape=(N,)
The amplitudes corresponding to each wave number k = range(-N/2, N/2)
See Also
--------
nfft_adjoint : adjoint non-equispaced fast Fourier transform
ndft : non-equispaced direct Fourier transform
nfft : non-equispaced fast Fourier transform
"""
x, f = np.broadcast_arrays(x, f)
assert x.ndim == 1
N = int(N)
assert N % 2 == 0
k = -(N // 2) + np.arange(N)
return np.dot(f, np.exp(2j * np.pi * k * x[:, None]))
def nfft(x, f_hat, sigma=3, tol=1E-8, m=None, kernel='gaussian',
use_fft=True, truncated=True):
"""Compute the non-equispaced fast Fourier transform
f_j = \sum_{-N/2 \le k < N/2} \hat{f}_k \exp(-2 \pi i k x_j)
Parameters
----------
x : array_like, shape=(M,)
The locations of the data points. Each value in x should lie
in the range [-1/2, 1/2).
f_hat : array_like, shape=(N,)
The amplitudes at each wave number k = range(-N/2, N/2).
sigma : int (optional, default=5)
The oversampling factor for the FFT gridding.
tol : float (optional, default=1E-8)
The desired tolerance of the truncation approximation.
m : int (optional)
The half-width of the truncated window. If not specified, ``m`` will
be estimated based on ``tol``.
kernel : string or NFFTKernel (optional, default='gaussian')
The desired convolution kernel for the calculation.
use_fft : bool (optional, default=True)
If True, use the FFT rather than DFT for fast computation.
truncated : bool (optional, default=True)
If True, use a fast truncated approximate summation matrix.
If False, use a slow full summation matrix.
Returns
-------
f : ndarray, shape=(M,)
The approximate Fourier summation evaluated at points x
See Also
--------
ndft : non-equispaced direct Fourier transform
nfft_adjoint : adjoint non-equispaced fast Fourier transform
ndft_adjoint : adjoint non-equispaced direct Fourier transform
"""
# Validate inputs
x, f_hat = map(np.asarray, (x, f_hat))
assert x.ndim == 1
assert f_hat.ndim == 1
N = len(f_hat)
assert N % 2 == 0
sigma = int(sigma)
assert sigma >= 2
n = N * sigma
kernel = KERNELS.get(kernel, kernel)
if m is None:
m = kernel.estimate_m(tol, N, sigma)
m = int(m)
assert m <= n // 2
k = -(N // 2) + np.arange(N)
# Compute the NFFT
ghat = f_hat / kernel.phi_hat(k, n, m, sigma) / n
g = fourier_sum(ghat, N, n, use_fft=use_fft)
mat = nfft_matrix(x, n, m, sigma, kernel, truncated=truncated)
f = mat.dot(g)
return f
def nfft_adjoint(x, f, N, sigma=3, tol=1E-8, m=None, kernel='gaussian',
use_fft=True, truncated=True):
"""Compute the adjoint non-equispaced fast Fourier transform
\hat{f}_k = \sum_{0 \le j < N} f_j \exp(2 \pi i k x_j)
where k = range(-N/2, N/2)
Parameters
----------
x : array_like, shape=(M,)
The locations of the data points.
f : array_like, shape=(M,)
The amplitudes at each location x
N : int
The number of frequencies at which to evaluate the result
sigma : int (optional, default=5)
The oversampling factor for the FFT gridding.
tol : float (optional, default=1E-8)
The desired tolerance of the truncation approximation.
m : int (optional)
The half-width of the truncated window. If not specified, ``m`` will
be estimated based on ``tol``.
kernel : string or NFFTKernel (optional, default='gaussian')
The desired convolution kernel for the calculation.
use_fft : bool (optional, default=True)
If True, use the FFT rather than DFT for fast computation.
truncated : bool (optional, default=True)
If True, use a fast truncated approximate summation matrix.
If False, use a slow full summation matrix.
Returns
-------
f_hat : ndarray, shape=(N,)
The approximate amplitudes corresponding to each wave number
k = range(-N/2, N/2)
See Also
--------
ndft_adjoint : adjoint non-equispaced direct Fourier transform
nfft : non-equispaced fast Fourier transform
ndft : non-equispaced direct Fourier transform
"""
# Validate inputs
x, f = np.broadcast_arrays(x, f)
assert x.ndim == 1
N = int(N)
assert N % 2 == 0
sigma = int(sigma)
assert sigma >= 2
n = N * sigma
kernel = KERNELS.get(kernel, kernel)
if m is None:
m = kernel.estimate_m(tol, N, sigma)
m = int(m)
assert m <= n // 2
k = -(N // 2) + np.arange(N)
# Compute the adjoint NFFT
mat = nfft_matrix(x, n, m, sigma, kernel, truncated=truncated)
g = mat.T.dot(f)
ghat = inv_fourier_sum(g, N, n, use_fft=use_fft)
fhat = ghat / kernel.phi_hat(k, n, m, sigma) / n
return fhat
|
jakevdp/nfft
|
nfft/core.py
|
Python
|
mit
| 6,446
|
[
"Gaussian"
] |
d90a96e67c439e6d525bb954d9a302cf57d780dee4300424cda59d6c0e532ad2
|
import random
import matplotlib.pyplot as plt
import numpy
''' 3_nn.py
simple neural net with 2 neurons
'''
''' -has primitive backprop
-weights deviate in opposite direction of error
-productivity is determined by difference between forward output and target
'''
'''TODO:
-one function to forward through all gates
-one function to update the weight of any gate
-one function to get the error derivative of any gate
-one function to forward through any gate
-one function to backward through all gates
'''
# forward propogation
def forwardGate( inp, weight ):
return ( inp * weight )
# forward entire circuit
def forwardAll( ):
# calculate derivative
def calcDerivative( inp, weight, h ):
output_0 = forwardGate( inp , weight )
output_1 = forwardGate( inp , weight + h )
derivative = ( output_1 - output_0 ) / h
return derivative
# calculate derivative on error
def calcErrorDerivative( inp, weight, h, target ):
output_0 = forwardGate( inp , weight )
error_0 = target - output_0
output_1 = forwardGate( inp , weight + h )
error_1 = target - output_1
derivative = ( error_1 - error_0 ) / h
return derivative
# outputs a randomy number between -1.0 and 1.0
def randy():
return ( random.random() * 2.0 ) - 1.0
# returns sign of a number
def sign( a ):
if a >= 0:
return 1
if a < 0:
return -1
''' goal is to make output equal to a target
-currently no complex backprop as only one neuron present
-not sure if derivative is even right
-doesnt like negative targets
|-suspect this has to do with an incorrect sign somewhere
'''
# - - - - - SETTINGS - - - - - #
target = random.random() * 100.0
tries = 100
stepSize = 0.1
h = 0.0001
inp = 2.0
weight_0 = random.random()
weight_1 = random.random()
inputs = [ 2.0 ]
weights = [ weight_0,
weight_1 ]
connections = [ None,
0 ]
input_sockets = [ 0,
None ]
# - - - - - PREP - - - - - #
output = 0.0
error = 0.0
output = forwardGate( inp, weight )
error = target - output
# save initial value
initialWeight = weight
initialOutput = output
initialError = error
# - - - - - PREP NUMPY - - - - - #
xAxisValues = range( 0, tries + 1 )
yAxisValues = [ initialError ]
# - - - - - MAIN - - - - - #
for i in range( 0, tries ):
# calculate error
output = forwardGate( inp, weight )
error = target - output
# calculate derivative
wDir = calcErrorDerivative( inp, weight, h, target )
# DEBUG
#print("wDir:\t" + "%.2f" % wDir + "\tError:\t" + "%.2f" % error )
# tweak x and y in direction of derivative
weight = weight + ( error / -wDir ) * stepSize
# STATS:
## add current best error to yAxisValues for graphing
yAxisValues.append( error )
# - - - - - DATA COLLECTING - - - - - #
# save final value
finalWeight = weight
output = forwardGate( inp, finalWeight )
error = target - output
finalOutput = output
finalError = error
# - - - - - RESULTS - - - - - #
print( "# - - - RESULTS - - - #" )
# output state before changes
print( "weight was: \t" + str( initialWeight ) )
print( "output was: \t" + str( initialOutput ) )
print( "target was: \t" + str( target ) )
print( "error was: \t" + str( initialError ) )
print("")
# output state after changes
print( "weight is: \t" + str( finalWeight ) )
print( "output is: \t" + str( finalOutput ) )
print( "target is: \t" + str( target ) )
print( "error is: \t" + str( finalError ) )
# - - - - - PLOT - - - - - #
plt.plot( xAxisValues, yAxisValues )
plt.xlabel( 'tries' )
plt.ylabel( 'error' )
plt.title( 'Error Over Tries' )
plt.grid( True )
plt.show()
|
wegfawefgawefg/NeuralNetDemos
|
3_nn.py
|
Python
|
gpl-3.0
| 3,572
|
[
"NEURON"
] |
61d53c689a4b69b238b0a5c9c73ec623208135b3e3d2300fc021543e83bf9e32
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class iapCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'create_brand': ('parent', 'brand', ),
'create_identity_aware_proxy_client': ('parent', 'identity_aware_proxy_client', ),
'delete_identity_aware_proxy_client': ('name', ),
'get_brand': ('name', ),
'get_iam_policy': ('resource', 'options', ),
'get_iap_settings': ('name', ),
'get_identity_aware_proxy_client': ('name', ),
'list_brands': ('parent', ),
'list_identity_aware_proxy_clients': ('parent', 'page_size', 'page_token', ),
'reset_identity_aware_proxy_client_secret': ('name', ),
'set_iam_policy': ('resource', 'policy', ),
'test_iam_permissions': ('resource', 'permissions', ),
'update_iap_settings': ('iap_settings', 'update_mask', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=iapCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the iap client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
googleapis/python-iap
|
scripts/fixup_iap_v1_keywords.py
|
Python
|
apache-2.0
| 6,634
|
[
"VisIt"
] |
bd0e2cefb763a1c4bf8094f5b5830d7ca7bf3cf24de42a70915b9328f5105cd5
|
#!/usr/bin/python
"""create simulated data for testing 2d model fit
"""
import matplotlib as mpl
# we do this because sometimes we run this without an X-server, and this backend doesn't need
# one. We set warn=False because the notebook uses a different backend and will spout out a big
# warning to that effect; that's unnecessarily alarming, so we hide it.
mpl.use('svg', warn=False)
import argparse
import pandas as pd
import numpy as np
from . import stimuli as sfp_stimuli
from . import model as sfp_model
def quadratic_mean(x):
"""returns the quadratic mean: sqrt{(x_1^2 + ... + x_n^2) / n}
"""
return np.sqrt(np.mean(x**2))
def calculate_error_distribution(first_level_df):
"""given a first_level_df, return the distribution of errors across voxels
this requries the first_level_df to contain the column amplitude_estimate_std_error_normed
(i.e., it's the summary df, not the full df, which contains each bootstrap).
we take the quadratic mean of each voxel's 48 normed standard errors (that's the appropriate
way to combine these errors) and then return an array containing one error per voxel. this
should be sampled to determine the noise level for individual simulated voxels
"""
errors = first_level_df.groupby(['varea', 'voxel']).amplitude_estimate_std_error_normed
return errors.apply(quadratic_mean).values
def simulate_voxel(true_model, freqs, noise_level=0, ecc_range=(1, 12),
angle_range=(0, 2*np.pi), pix_diam=1080, deg_diam=24,
vox_ecc=None, vox_angle=None):
"""simulate a single voxel
noise_level should be a float. to add noise, we normalize our predictions (to have an L2 of 1),
add noise from a normal distribution with a mean of 0 and a standard deviation of
`noise_level`, and un-normalize our predictions (by multiplying by its original L2 norm).
"""
if vox_ecc is None:
vox_ecc = np.random.uniform(*ecc_range)
if vox_angle is None:
vox_angle = np.random.uniform(*angle_range)
mags, direcs = [], []
for w_r, w_a in freqs:
_, _, m, d = sfp_stimuli.sf_cpd(pix_diam, deg_diam, vox_ecc, vox_angle, w_r=w_r, w_a=w_a)
mags.append(m)
direcs.append(d)
resps = true_model.evaluate(mags, direcs, vox_ecc, vox_angle)
resps = resps.detach().numpy()
resps_norm = np.linalg.norm(resps, 2)
normed_resps = resps / resps_norm
# this means that the noise_level argument controls the size of the error
# in the normed responses (not the un-normed ones)
normed_resps += np.random.normal(scale=noise_level, size=len(resps))
# since the noise_level becomes the standard deviation of a normal distribution, the precision
# is the reciprocal of its square
if noise_level != 0:
precision = 1. / ((noise_level * resps_norm)**2)
else:
# in this case, just set the precision to 1, so it's the same for all of them. only the
# relative precision matters anyway; if they're all identical it doesn't matter what the
# value is.
precision = 1.
return pd.DataFrame({'eccen': vox_ecc, 'angle': vox_angle, 'local_sf_magnitude': mags,
'local_sf_xy_direction': direcs,
'amplitude_estimate_median': normed_resps * resps_norm,
'amplitude_estimate_std_error': noise_level * resps_norm,
'true_amplitude_estimate_median': resps,
'amplitude_estimate_median_normed': normed_resps,
'amplitude_estimate_std_error_normed': noise_level,
'amplitude_estimate_norm': resps_norm,
'precision': precision,
'stimulus_class': range(len(freqs))})
def simulate_data(true_model, num_voxels=100, noise_level=0, num_bootstraps=10,
noise_source_path=None):
"""simulate a bunch of voxels
if noise_source_path is None, then all voxels have the same noise, which is drawn from a
Gaussian with mean 0 and standard deviation `noise_level` (after normalization to having an L2
norm of 1). if first_level_df is not None, then we grab the error distribution of voxels found
in it (see `calculate_error_distribution`), multiply those values by noise_level, and sample
once per voxel
"""
freqs = sfp_stimuli._gen_freqs([2**i for i in np.arange(2.5, 7.5, .5)], True)
if noise_source_path is not None:
noise_source_df = pd.read_csv(noise_source_path)
noise_distribution = noise_level * calculate_error_distribution(noise_source_df)
else:
noise_distribution = [noise_level]
df = []
for i in range(num_voxels):
vox_ecc = np.random.uniform(1, 12)
vox_angle = np.random.uniform(0, 2*np.pi)
noise_level = np.random.choice(noise_distribution)
for j in range(num_bootstraps):
tmp = simulate_voxel(true_model, freqs, noise_level=noise_level,
vox_ecc=vox_ecc, vox_angle=vox_angle)
tmp['bootstrap_num'] = j
tmp['voxel'] = i
df.append(tmp)
df = pd.concat(df)
df['varea'] = 1
# we want the generating model and its parameters stored here
df['true_model_type'] = true_model.model_type
for name, val in true_model.named_parameters():
df['true_model_%s' % name] = val.detach().numpy()
df['noise_level'] = noise_level
df['noise_source_df'] = noise_source_path
return df
def main(model_period_orientation_type='iso', model_eccentricity_type='full',
model_amplitude_orientation_type='iso', sigma=.4, sf_ecc_slope=1, sf_ecc_intercept=0,
abs_mode_cardinals=0, abs_mode_obliques=0, rel_mode_cardinals=0, rel_mode_obliques=0,
abs_amplitude_cardinals=0, abs_amplitude_obliques=0, rel_amplitude_cardinals=0,
rel_amplitude_obliques=0, num_voxels=100, noise_level=0, save_path=None,
noise_source_path=None, num_bootstraps=100):
"""Simulate first level data to be fit with 2d tuning model.
Note that when calling the function, you can set every parameter
individually, but, depending on the values of the
model_period_orientation_type, model_eccentricity_type, and
model_amplitude_orientation_type, some of them have specific values
(often 0), they will be set to. If this happens, a warning will be
raised.
if save_path is not None, should be a list with one or two strs, first
giving the path to save the summary dataframe (median across bootstraps),
second the path to save the full dataframe (all bootstraps). If only one
str, we only save the summary version.
"""
model = sfp_model.LogGaussianDonut(model_period_orientation_type, model_eccentricity_type,
model_amplitude_orientation_type, sigma, sf_ecc_slope,
sf_ecc_intercept, abs_mode_cardinals, abs_mode_obliques,
rel_mode_cardinals,rel_mode_obliques,
abs_amplitude_cardinals, abs_amplitude_obliques,
rel_amplitude_cardinals, rel_amplitude_obliques)
model.eval()
df = simulate_data(model, num_voxels, noise_level, num_bootstraps, noise_source_path)
df['period_orientation_type'] = model_period_orientation_type
df['eccentricity_type'] = model_eccentricity_type
df['amplitude_orientation_type'] = model_amplitude_orientation_type
if save_path is not None:
# summary dataframe
df.groupby(['voxel', 'stimulus_class']).median().to_csv(save_path[0],
index=False)
# full dataframe
col_renamer = {c: c.replace('_median', '') for c in df.columns
if 'median' in c}
df.rename(columns=col_renamer).to_csv(save_path[1], index=False)
return df
if __name__ == '__main__':
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter):
pass
parser = argparse.ArgumentParser(
description=("Simulate first level data to be fit with 2d tuning model. Note that when "
"calling the function, you can set every parameter individually, but, "
"depending on the values of the model_orientation_type, model_eccentricity_"
"type, and model_vary_amplitude, some of them have specific values (often 0),"
"they will be set to. If this happens, a warning will be raised."),
formatter_class=CustomFormatter)
parser.add_argument("save_path", nargs='+',
help=("Path (should end in .csv) where we'll save the simulated data. If "
"one str, we only save the summary version, if two we save both "
"summary and full."))
parser.add_argument("--model_period_orientation_type", '-p', default='iso',
help=("{iso, absolute, relative, full}\nEffect of orientation on "
"preferred period\n- iso: model is isotropic, "
"predictions identical for all orientations.\n- absolute: model can"
" fit differences in absolute orientation, that is, in Cartesian "
"coordinates, such that sf_angle=0 correponds to 'to the right'\n- "
"relative: model can fit differences in relative orientation, that "
"is, in retinal polar coordinates, such that sf_angle=0 corresponds"
" to 'away from the fovea'\n- full: model can fit differences in "
"both absolute and relative orientations"))
parser.add_argument("--model_eccentricity_type", '-e', default='full',
help=("{scaling, constant, full}\n- scaling: model's relationship between"
" preferred period and eccentricity is exactly scaling, that is, the"
" preferred period is equal to the eccentricity.\n- constant: model'"
"s relationship between preferred period and eccentricity is exactly"
" constant, that is, it does not change with eccentricity but is "
"flat.\n- full: model discovers the relationship between "
"eccentricity and preferred period, though it is constrained to be"
" linear (i.e., model solves for a and b in period = a * "
"eccentricity + b)"))
parser.add_argument("--model_amplitude_orientation_type", '-o', default='iso',
help=("{iso, absolute, relative, full}\nEffect of orientation on "
"max_amplitude\n- iso: model is isotropic, "
"predictions identical for all orientations.\n- absolute: model can"
" fit differences in absolute orientation, that is, in Cartesian "
"coordinates, such that sf_angle=0 correponds to 'to the right'\n- "
"relative: model can fit differences in relative orientation, that "
"is, in retinal polar coordinates, such that sf_angle=0 corresponds"
" to 'away from the fovea'\n- full: model can fit differences in "
"both absolute and relative orientations"))
parser.add_argument("--num_voxels", '-n', default=100, help="Number of voxels to simulate",
type=int)
parser.add_argument("--num_bootstraps", default=100, help="Number of bootstraps per voxel",
type=int)
parser.add_argument("--sigma", '-s', default=.4, type=float, help="Sigma of log-Normal donut")
parser.add_argument("--sf_ecc_slope", '-a', default=1, type=float,
help=("Slope of relationship between tuning and eccentricity for log-"
"Normal donut"))
parser.add_argument("--sf_ecc_intercept", '-b', default=0, type=float,
help=("Intercept of relationship between tuning and eccentricity for "
"log-Normal donut"))
parser.add_argument("--rel_mode_cardinals", "-rmc", default=0, type=float,
help=("The strength of the cardinal-effect of the relative orientation (so"
" angle=0 corresponds to away from the fovea) on the mode. That is, "
"the coefficient of cos(2*relative_orientation)"))
parser.add_argument("--rel_mode_obliques", "-rmo", default=0, type=float,
help=("The strength of the oblique-effect of the relative orientation (so"
" angle=0 corresponds to away from the fovea) on the mode. That is, "
"the coefficient of cos(4*relative_orientation)"))
parser.add_argument("--rel_amplitude_cardinals", "-rac", default=0, type=float,
help=("The strength of the cardinal-effect of the relative orientation (so"
" angle=0 corresponds to away from the fovea) on the amplitude. That"
" is, the coefficient of cos(2*relative_orientation)"))
parser.add_argument("--rel_amplitude_obliques", "-rao", default=0, type=float,
help=("The strength of the oblique-effect of the relative orientation (so"
" angle=0 corresponds to away from the fovea) on the amplitude. That"
" is, the coefficient of cos(4*relative_orientation)"))
parser.add_argument("--abs_mode_cardinals", "-amc", default=0, type=float,
help=("The strength of the cardinal-effect of the absolute orientation (so"
" angle=0 corresponds to the right) on the mode. That is, "
"the coefficient of cos(2*absolute_orientation)"))
parser.add_argument("--abs_mode_obliques", "-amo", default=0, type=float,
help=("The strength of the oblique-effect of the absolute orientation (so"
" angle=0 corresponds to the right) on the mode. That is, "
"the coefficient of cos(4*absolute_orientation)"))
parser.add_argument("--abs_amplitude_cardinals", "-aac", default=0, type=float,
help=("The strength of the cardinal-effect of the absolute orientation (so"
" angle=0 corresponds to the right) on the amplitude. That"
" is, the coefficient of cos(2*absolute_orientation)"))
parser.add_argument("--abs_amplitude_obliques", "-aao", default=0, type=float,
help=("The strength of the oblique-effect of the absolute orientation (so"
" angle=0 corresponds to the right) on the amplitude. That"
" is, the coefficient of cos(4*absolute_orientation)"))
parser.add_argument('--noise_source_path', default=None,
help=("None or path to a first level summary dataframe. If None, then all "
"simulated voxels have the same noise, determined by `noise_level` "
"argment. If a path, then we find calculate the error distribution"
" based on that dataframe (see `calculate_error_distribution` "
"function for details) and each voxel's noise level is sampled "
"independently from that distribution."))
parser.add_argument("--noise_level", '-l', default=0, type=float,
help=("Noise level. If noise_source_path is None, this is the std dev of a"
" normal distribution with mean 0, which will be added to the "
"simulated data. If "
"noise_source_path is not None, then we multiply the noise "
"distribution obtained from that dataframe by this number (see that"
" variable's help for more details). In both cases, the simulated "
"responses are normalized to have an L2 norm of 1 before noise is"
" added, so this should be interpreted as relative to a unit vector"
". In both cases, a value of 0 means no noise."))
args = vars(parser.parse_args())
main(**args)
|
billbrod/spatial-frequency-preferences
|
sfp/simulate_data.py
|
Python
|
mit
| 16,900
|
[
"Gaussian"
] |
c4b7e53e6ff398e9fdfdc786f07adce8ce40767bad413d2810cbd03aad5a357f
|
"""
This module contains classes for walking abstract syntax trees and transforming
them.
"""
from .node import Node
class NodeVisitor(object):
"""
Walks the abstract syntax tree, calling the visitor function for every
subtree found.
"""
def __init__(self):
self._visitor_cache = {}
def get_visitor(self, node):
node_class = node.__class__
visitor = self._visitor_cache.get(node_class)
if visitor is None:
method = 'visit_%s' % node_class.__name__
visitor = getattr(self, method, self.generic_visit)
self._visitor_cache[node_class] = visitor
return visitor
def visit(self, node):
visitor = self.get_visitor(node)
return visitor(node)
def generic_visit(self, node):
if isinstance(node, Node):
self.visit_node(node)
def visit_node(self, node):
for child in node.iter_children():
self.visit(child)
def visit_list(self, node):
for element in node:
self.visit(element)
class NodeTransformer(NodeVisitor):
"""
Walks the abstract syntax tree and allows modification of nodes.
This will walk the AST and use the return value of the visitors to replace
or remove the old nodes. If the return value of the visitor method is
``None``, the node will be removed from its location, otherwise it will
be replaced with the return value. If the return value is the original
node, no replacement takes place.
"""
def generic_visit(self, node):
if isinstance(node, Node):
return self.visit_node(node)
return node
def visit_node(self, node):
for field, old_value in node.iter_fields():
new_value = self.visit(old_value)
setattr(node, field, new_value)
return node
def visit_list(self, node):
new_values = []
for element in node:
new_value = self.visit(element)
if new_value is None:
continue
elif isinstance(new_value, list):
new_values.extend(new_value)
else:
new_values.append(new_value)
return new_values
|
jeffkistler/BigRig
|
bigrig/parser/visitor.py
|
Python
|
bsd-3-clause
| 2,223
|
[
"VisIt"
] |
d2bf0efab32814b4c36fde4ada330bdd07bd8c97064d0b98eb4db511c959ee6c
|
#!/usr/bin/env python
import os
import subprocess
import sys
from distutils.command.build import build
from distutils.command.sdist import sdist
from distutils.errors import DistutilsExecError
from distutils.version import StrictVersion
from setuptools.command.build_ext import build_ext
from setuptools.command.install import install
from setuptools import setup, Extension
__author__ = 'Noel O\'Boyle'
__email__ = 'openbabel-discuss@lists.sourceforge.net'
__version__ = '1.8.2'
__license__ = 'GPL'
if os.path.exists('README.rst'):
long_description = open('README.rst').read()
else:
long_description = '''
The Open Babel package provides a Python wrapper to the Open Babel C++
chemistry library. Open Babel is a chemical toolbox designed to speak
the many languages of chemical data. It's an open, collaborative
project allowing anyone to search, convert, analyze, or store data from
molecular modeling, chemistry, solid-state materials, biochemistry, or
related areas. It provides a broad base of chemical functionality for
custom development.
'''
class PkgConfigError(Exception):
pass
def pkgconfig(package, option):
"""Wrapper around pkg-config command line tool."""
try:
p = subprocess.Popen(['pkg-config', option, package],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = p.communicate()
if stderr:
raise PkgConfigError('package %s could not be found by pkg-config' % package)
return stdout.strip()
except OSError:
raise PkgConfigError('pkg-config could not be found')
def locate_ob():
"""Try use pkgconfig to locate Open Babel, otherwise guess default location."""
try:
version = pkgconfig('openbabel-2.0', '--modversion')
if not StrictVersion(version) >= StrictVersion('2.3.0'):
print('Warning: Open Babel 2.3.0 or later is required. Your version (%s) may not be compatible.' % version)
include_dirs = pkgconfig('openbabel-2.0', '--variable=pkgincludedir')
library_dirs = pkgconfig('openbabel-2.0', '--variable=libdir')
print('Open Babel location automatically determined by pkg-config:')
except PkgConfigError as e:
print('Warning: %s.\nGuessing Open Babel location:' % e)
include_dirs = '/usr/local/include/openbabel-2.0'
library_dirs = '/usr/local/lib'
return include_dirs, library_dirs
class CustomBuild(build):
"""Ensure build_ext runs first in build command."""
def run(self):
self.run_command('build_ext')
build.run(self)
class CustomInstall(install):
"""Ensure build_ext runs first in install command."""
def run(self):
self.run_command('build_ext')
self.do_egg_install()
class CustomSdist(sdist):
"""Add swig interface files into distribution from parent directory."""
def make_release_tree(self, base_dir, files):
sdist.make_release_tree(self, base_dir, files)
link = 'hard' if hasattr(os, 'link') else None
self.copy_file('../stereo.i', base_dir, link=link)
self.copy_file('../openbabel-python.i', base_dir, link=link)
class CustomBuildExt(build_ext):
"""Custom build_ext to set SWIG options and print a better error message."""
def finalize_options(self):
# Setting include_dirs, library_dirs, swig_opts here instead of in Extension constructor allows them to be
# overridden using -I and -L command line options to python setup.py build_ext.
if not self.include_dirs and not self.library_dirs:
self.include_dirs, self.library_dirs = locate_ob()
else:
print('Open Babel location manually specified:')
print('- include_dirs: %s\n- library_dirs: %s' % (self.include_dirs, self.library_dirs))
build_ext.finalize_options(self)
self.swig_opts = ['-c++', '-small', '-O', '-templatereduce', '-naturalvar']
self.swig_opts += ['-I%s' % i for i in self.include_dirs]
def swig_sources(self, sources, extension):
try:
return build_ext.swig_sources(self, sources, extension)
except DistutilsExecError:
print('\nError: SWIG failed. Is Open Babel installed?\n'
'You may need to manually specify the location of Open Babel include and library directories. '
'For example:\n'
' python setup.py build_ext -I/usr/local/include/openbabel-2.0 -L/usr/local/lib\n'
' python setup.py install')
sys.exit(1)
obextension = Extension('_openbabel', ['openbabel-python.i'], libraries=['openbabel'])
setup(name='openbabel',
version=__version__,
author=__author__,
author_email=__email__,
license=__license__,
url='http://openbabel.org/',
description='Python interface to the Open Babel chemistry library',
long_description=long_description,
zip_safe=True,
cmdclass={'build': CustomBuild, 'build_ext': CustomBuildExt, 'install': CustomInstall, 'sdist': CustomSdist},
py_modules=['openbabel', 'pybel'],
ext_modules=[obextension],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Other Environment',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: OS Independent',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
'Programming Language :: C++',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Software Development :: Libraries'
]
)
|
jnavila/openbabel
|
scripts/python/setup.py
|
Python
|
gpl-2.0
| 6,165
|
[
"Open Babel",
"Pybel"
] |
3d4a529569a3851429684836eed9efa98e112fd66e29bd75b8fb64d9fa6bcd33
|
"""SCons.Util
Various utility functions go here.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Util.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import os
import sys
import copy
import re
import types
from collections import UserDict, UserList, UserString
# Don't "from types import ..." these because we need to get at the
# types module later to look for UnicodeType.
InstanceType = types.InstanceType
MethodType = types.MethodType
FunctionType = types.FunctionType
try: unicode
except NameError: UnicodeType = None
else: UnicodeType = unicode
def dictify(keys, values, result={}):
for k, v in zip(keys, values):
result[k] = v
return result
_altsep = os.altsep
if _altsep is None and sys.platform == 'win32':
# My ActivePython 2.0.1 doesn't set os.altsep! What gives?
_altsep = '/'
if _altsep:
def rightmost_separator(path, sep):
return max(path.rfind(sep), path.rfind(_altsep))
else:
def rightmost_separator(path, sep):
return path.rfind(sep)
# First two from the Python Cookbook, just for completeness.
# (Yeah, yeah, YAGNI...)
def containsAny(str, set):
"""Check whether sequence str contains ANY of the items in set."""
for c in set:
if c in str: return 1
return 0
def containsAll(str, set):
"""Check whether sequence str contains ALL of the items in set."""
for c in set:
if c not in str: return 0
return 1
def containsOnly(str, set):
"""Check whether sequence str contains ONLY items in set."""
for c in str:
if c not in set: return 0
return 1
def splitext(path):
"Same as os.path.splitext() but faster."
sep = rightmost_separator(path, os.sep)
dot = path.rfind('.')
# An ext is only real if it has at least one non-digit char
if dot > sep and not containsOnly(path[dot:], "0123456789."):
return path[:dot],path[dot:]
else:
return path,""
def updrive(path):
"""
Make the drive letter (if any) upper case.
This is useful because Windows is inconsistent on the case
of the drive letter, which can cause inconsistencies when
calculating command signatures.
"""
drive, rest = os.path.splitdrive(path)
if drive:
path = drive.upper() + rest
return path
class NodeList(UserList):
"""This class is almost exactly like a regular list of Nodes
(actually it can hold any object), with one important difference.
If you try to get an attribute from this list, it will return that
attribute from every item in the list. For example:
>>> someList = NodeList([ ' foo ', ' bar ' ])
>>> someList.strip()
[ 'foo', 'bar' ]
"""
def __nonzero__(self):
return len(self.data) != 0
def __str__(self):
return ' '.join(map(str, self.data))
def __iter__(self):
return iter(self.data)
def __call__(self, *args, **kwargs):
result = [x(*args, **kwargs) for x in self.data]
return self.__class__(result)
def __getattr__(self, name):
result = [getattr(x, name) for x in self.data]
return self.__class__(result)
_get_env_var = re.compile(r'^\$([_a-zA-Z]\w*|{[_a-zA-Z]\w*})$')
def get_environment_var(varstr):
"""Given a string, first determine if it looks like a reference
to a single environment variable, like "$FOO" or "${FOO}".
If so, return that variable with no decorations ("FOO").
If not, return None."""
mo=_get_env_var.match(to_String(varstr))
if mo:
var = mo.group(1)
if var[0] == '{':
return var[1:-1]
else:
return var
else:
return None
class DisplayEngine(object):
print_it = True
def __call__(self, text, append_newline=1):
if not self.print_it:
return
if append_newline: text = text + '\n'
try:
sys.stdout.write(unicode(text))
except IOError:
# Stdout might be connected to a pipe that has been closed
# by now. The most likely reason for the pipe being closed
# is that the user has press ctrl-c. It this is the case,
# then SCons is currently shutdown. We therefore ignore
# IOError's here so that SCons can continue and shutdown
# properly so that the .sconsign is correctly written
# before SCons exits.
pass
def set_mode(self, mode):
self.print_it = mode
def render_tree(root, child_func, prune=0, margin=[0], visited=None):
"""
Render a tree of nodes into an ASCII tree view.
root - the root node of the tree
child_func - the function called to get the children of a node
prune - don't visit the same node twice
margin - the format of the left margin to use for children of root.
1 results in a pipe, and 0 results in no pipe.
visited - a dictionary of visited nodes in the current branch if not prune,
or in the whole tree if prune.
"""
rname = str(root)
# Initialize 'visited' dict, if required
if visited is None:
visited = {}
children = child_func(root)
retval = ""
for pipe in margin[:-1]:
if pipe:
retval = retval + "| "
else:
retval = retval + " "
if rname in visited:
return retval + "+-[" + rname + "]\n"
retval = retval + "+-" + rname + "\n"
if not prune:
visited = copy.copy(visited)
visited[rname] = 1
for i in range(len(children)):
margin.append(i<len(children)-1)
retval = retval + render_tree(children[i], child_func, prune, margin, visited
)
margin.pop()
return retval
IDX = lambda N: N and 1 or 0
def print_tree(root, child_func, prune=0, showtags=0, margin=[0], visited=None):
"""
Print a tree of nodes. This is like render_tree, except it prints
lines directly instead of creating a string representation in memory,
so that huge trees can be printed.
root - the root node of the tree
child_func - the function called to get the children of a node
prune - don't visit the same node twice
showtags - print status information to the left of each node line
margin - the format of the left margin to use for children of root.
1 results in a pipe, and 0 results in no pipe.
visited - a dictionary of visited nodes in the current branch if not prune,
or in the whole tree if prune.
"""
rname = str(root)
# Initialize 'visited' dict, if required
if visited is None:
visited = {}
if showtags:
if showtags == 2:
legend = (' E = exists\n' +
' R = exists in repository only\n' +
' b = implicit builder\n' +
' B = explicit builder\n' +
' S = side effect\n' +
' P = precious\n' +
' A = always build\n' +
' C = current\n' +
' N = no clean\n' +
' H = no cache\n' +
'\n')
sys.stdout.write(unicode(legend))
tags = ['[']
tags.append(' E'[IDX(root.exists())])
tags.append(' R'[IDX(root.rexists() and not root.exists())])
tags.append(' BbB'[[0,1][IDX(root.has_explicit_builder())] +
[0,2][IDX(root.has_builder())]])
tags.append(' S'[IDX(root.side_effect)])
tags.append(' P'[IDX(root.precious)])
tags.append(' A'[IDX(root.always_build)])
tags.append(' C'[IDX(root.is_up_to_date())])
tags.append(' N'[IDX(root.noclean)])
tags.append(' H'[IDX(root.nocache)])
tags.append(']')
else:
tags = []
def MMM(m):
return [" ","| "][m]
margins = list(map(MMM, margin[:-1]))
children = child_func(root)
if prune and rname in visited and children:
sys.stdout.write(''.join(tags + margins + ['+-[', rname, ']']) + u'\n')
return
sys.stdout.write(''.join(tags + margins + ['+-', rname]) + u'\n')
visited[rname] = 1
if children:
margin.append(1)
idx = IDX(showtags)
for C in children[:-1]:
print_tree(C, child_func, prune, idx, margin, visited)
margin[-1] = 0
print_tree(children[-1], child_func, prune, idx, margin, visited)
margin.pop()
# Functions for deciding if things are like various types, mainly to
# handle UserDict, UserList and UserString like their underlying types.
#
# Yes, all of this manual testing breaks polymorphism, and the real
# Pythonic way to do all of this would be to just try it and handle the
# exception, but handling the exception when it's not the right type is
# often too slow.
# We are using the following trick to speed up these
# functions. Default arguments are used to take a snapshot of
# the global functions and constants used by these functions. This
# transforms accesses to global variable into local variables
# accesses (i.e. LOAD_FAST instead of LOAD_GLOBAL).
DictTypes = (dict, UserDict)
ListTypes = (list, UserList)
SequenceTypes = (list, tuple, UserList)
# Note that profiling data shows a speed-up when comparing
# explicitly with str and unicode instead of simply comparing
# with basestring. (at least on Python 2.5.1)
StringTypes = (str, unicode, UserString)
# Empirically, it is faster to check explicitly for str and
# unicode than for basestring.
BaseStringTypes = (str, unicode)
def is_Dict(obj, isinstance=isinstance, DictTypes=DictTypes):
return isinstance(obj, DictTypes)
def is_List(obj, isinstance=isinstance, ListTypes=ListTypes):
return isinstance(obj, ListTypes)
def is_Sequence(obj, isinstance=isinstance, SequenceTypes=SequenceTypes):
return isinstance(obj, SequenceTypes)
def is_Tuple(obj, isinstance=isinstance, tuple=tuple):
return isinstance(obj, tuple)
def is_String(obj, isinstance=isinstance, StringTypes=StringTypes):
return isinstance(obj, StringTypes)
def is_Scalar(obj, isinstance=isinstance, StringTypes=StringTypes, SequenceTypes=SequenceTypes):
# Profiling shows that there is an impressive speed-up of 2x
# when explicitly checking for strings instead of just not
# sequence when the argument (i.e. obj) is already a string.
# But, if obj is a not string then it is twice as fast to
# check only for 'not sequence'. The following code therefore
# assumes that the obj argument is a string most of the time.
return isinstance(obj, StringTypes) or not isinstance(obj, SequenceTypes)
def do_flatten(sequence, result, isinstance=isinstance,
StringTypes=StringTypes, SequenceTypes=SequenceTypes):
for item in sequence:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
def flatten(obj, isinstance=isinstance, StringTypes=StringTypes,
SequenceTypes=SequenceTypes, do_flatten=do_flatten):
"""Flatten a sequence to a non-nested list.
Flatten() converts either a single scalar or a nested sequence
to a non-nested list. Note that flatten() considers strings
to be scalars instead of sequences like Python would.
"""
if isinstance(obj, StringTypes) or not isinstance(obj, SequenceTypes):
return [obj]
result = []
for item in obj:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
return result
def flatten_sequence(sequence, isinstance=isinstance, StringTypes=StringTypes,
SequenceTypes=SequenceTypes, do_flatten=do_flatten):
"""Flatten a sequence to a non-nested list.
Same as flatten(), but it does not handle the single scalar
case. This is slightly more efficient when one knows that
the sequence to flatten can not be a scalar.
"""
result = []
for item in sequence:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
return result
# Generic convert-to-string functions that abstract away whether or
# not the Python we're executing has Unicode support. The wrapper
# to_String_for_signature() will use a for_signature() method if the
# specified object has one.
#
def to_String(s,
isinstance=isinstance, str=str,
UserString=UserString, BaseStringTypes=BaseStringTypes):
if isinstance(s,BaseStringTypes):
# Early out when already a string!
return s
elif isinstance(s, UserString):
# s.data can only be either a unicode or a regular
# string. Please see the UserString initializer.
return s.data
else:
return str(s)
def to_String_for_subst(s,
isinstance=isinstance, str=str, to_String=to_String,
BaseStringTypes=BaseStringTypes, SequenceTypes=SequenceTypes,
UserString=UserString):
# Note that the test cases are sorted by order of probability.
if isinstance(s, BaseStringTypes):
return s
elif isinstance(s, SequenceTypes):
l = []
for e in s:
l.append(to_String_for_subst(e))
return ' '.join( s )
elif isinstance(s, UserString):
# s.data can only be either a unicode or a regular
# string. Please see the UserString initializer.
return s.data
else:
return str(s)
def to_String_for_signature(obj, to_String_for_subst=to_String_for_subst,
AttributeError=AttributeError):
try:
f = obj.for_signature
except AttributeError:
return to_String_for_subst(obj)
else:
return f()
# The SCons "semi-deep" copy.
#
# This makes separate copies of lists (including UserList objects)
# dictionaries (including UserDict objects) and tuples, but just copies
# references to anything else it finds.
#
# A special case is any object that has a __semi_deepcopy__() method,
# which we invoke to create the copy. Currently only used by
# BuilderDict to actually prevent the copy operation (as invalid on that object).
#
# The dispatch table approach used here is a direct rip-off from the
# normal Python copy module.
_semi_deepcopy_dispatch = d = {}
def semi_deepcopy_dict(x, exclude = [] ):
copy = {}
for key, val in x.items():
# The regular Python copy.deepcopy() also deepcopies the key,
# as follows:
#
# copy[semi_deepcopy(key)] = semi_deepcopy(val)
#
# Doesn't seem like we need to, but we'll comment it just in case.
if key not in exclude:
copy[key] = semi_deepcopy(val)
return copy
d[dict] = semi_deepcopy_dict
def _semi_deepcopy_list(x):
return list(map(semi_deepcopy, x))
d[list] = _semi_deepcopy_list
def _semi_deepcopy_tuple(x):
return tuple(map(semi_deepcopy, x))
d[tuple] = _semi_deepcopy_tuple
def semi_deepcopy(x):
copier = _semi_deepcopy_dispatch.get(type(x))
if copier:
return copier(x)
else:
if hasattr(x, '__semi_deepcopy__') and callable(x.__semi_deepcopy__):
return x.__semi_deepcopy__()
elif isinstance(x, UserDict):
return x.__class__(semi_deepcopy_dict(x))
elif isinstance(x, UserList):
return x.__class__(_semi_deepcopy_list(x))
return x
class Proxy(object):
"""A simple generic Proxy class, forwarding all calls to
subject. So, for the benefit of the python newbie, what does
this really mean? Well, it means that you can take an object, let's
call it 'objA', and wrap it in this Proxy class, with a statement
like this
proxyObj = Proxy(objA),
Then, if in the future, you do something like this
x = proxyObj.var1,
since Proxy does not have a 'var1' attribute (but presumably objA does),
the request actually is equivalent to saying
x = objA.var1
Inherit from this class to create a Proxy.
Note that, with new-style classes, this does *not* work transparently
for Proxy subclasses that use special .__*__() method names, because
those names are now bound to the class, not the individual instances.
You now need to know in advance which .__*__() method names you want
to pass on to the underlying Proxy object, and specifically delegate
their calls like this:
class Foo(Proxy):
__str__ = Delegate('__str__')
"""
def __init__(self, subject):
"""Wrap an object as a Proxy object"""
self._subject = subject
def __getattr__(self, name):
"""Retrieve an attribute from the wrapped object. If the named
attribute doesn't exist, AttributeError is raised"""
return getattr(self._subject, name)
def get(self):
"""Retrieve the entire wrapped object"""
return self._subject
def __cmp__(self, other):
if issubclass(other.__class__, self._subject.__class__):
return cmp(self._subject, other)
return cmp(self.__dict__, other.__dict__)
class Delegate(object):
"""A Python Descriptor class that delegates attribute fetches
to an underlying wrapped subject of a Proxy. Typical use:
class Foo(Proxy):
__str__ = Delegate('__str__')
"""
def __init__(self, attribute):
self.attribute = attribute
def __get__(self, obj, cls):
if isinstance(obj, cls):
return getattr(obj._subject, self.attribute)
else:
return self
# attempt to load the windows registry module:
can_read_reg = 0
try:
import winreg
can_read_reg = 1
hkey_mod = winreg
RegOpenKeyEx = winreg.OpenKeyEx
RegEnumKey = winreg.EnumKey
RegEnumValue = winreg.EnumValue
RegQueryValueEx = winreg.QueryValueEx
RegError = winreg.error
except ImportError:
try:
import win32api
import win32con
can_read_reg = 1
hkey_mod = win32con
RegOpenKeyEx = win32api.RegOpenKeyEx
RegEnumKey = win32api.RegEnumKey
RegEnumValue = win32api.RegEnumValue
RegQueryValueEx = win32api.RegQueryValueEx
RegError = win32api.error
except ImportError:
class _NoError(Exception):
pass
RegError = _NoError
WinError = None
# Make sure we have a definition of WindowsError so we can
# run platform-independent tests of Windows functionality on
# platforms other than Windows. (WindowsError is, in fact, an
# OSError subclass on Windows.)
class PlainWindowsError(OSError):
pass
try:
WinError = WindowsError
except NameError:
WinError = PlainWindowsError
if can_read_reg:
HKEY_CLASSES_ROOT = hkey_mod.HKEY_CLASSES_ROOT
HKEY_LOCAL_MACHINE = hkey_mod.HKEY_LOCAL_MACHINE
HKEY_CURRENT_USER = hkey_mod.HKEY_CURRENT_USER
HKEY_USERS = hkey_mod.HKEY_USERS
def RegGetValue(root, key):
"""This utility function returns a value in the registry
without having to open the key first. Only available on
Windows platforms with a version of Python that can read the
registry. Returns the same thing as
SCons.Util.RegQueryValueEx, except you just specify the entire
path to the value, and don't have to bother opening the key
first. So:
Instead of:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
r'SOFTWARE\Microsoft\Windows\CurrentVersion')
out = SCons.Util.RegQueryValueEx(k,
'ProgramFilesDir')
You can write:
out = SCons.Util.RegGetValue(SCons.Util.HKEY_LOCAL_MACHINE,
r'SOFTWARE\Microsoft\Windows\CurrentVersion\ProgramFilesDir')
"""
# I would use os.path.split here, but it's not a filesystem
# path...
p = key.rfind('\\') + 1
keyp = key[:p-1] # -1 to omit trailing slash
val = key[p:]
k = RegOpenKeyEx(root, keyp)
return RegQueryValueEx(k,val)
else:
HKEY_CLASSES_ROOT = None
HKEY_LOCAL_MACHINE = None
HKEY_CURRENT_USER = None
HKEY_USERS = None
def RegGetValue(root, key):
raise WinError
def RegOpenKeyEx(root, key):
raise WinError
if sys.platform == 'win32':
def WhereIs(file, path=None, pathext=None, reject=[]):
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = path.split(os.pathsep)
if pathext is None:
try:
pathext = os.environ['PATHEXT']
except KeyError:
pathext = '.COM;.EXE;.BAT;.CMD'
if is_String(pathext):
pathext = pathext.split(os.pathsep)
for ext in pathext:
if ext.lower() == file[-len(ext):].lower():
pathext = ['']
break
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
try:
reject.index(fext)
except ValueError:
return os.path.normpath(fext)
continue
return None
elif os.name == 'os2':
def WhereIs(file, path=None, pathext=None, reject=[]):
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = path.split(os.pathsep)
if pathext is None:
pathext = ['.exe', '.cmd']
for ext in pathext:
if ext.lower() == file[-len(ext):].lower():
pathext = ['']
break
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
try:
reject.index(fext)
except ValueError:
return os.path.normpath(fext)
continue
return None
else:
def WhereIs(file, path=None, pathext=None, reject=[]):
import stat
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = path.split(os.pathsep)
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for d in path:
f = os.path.join(d, file)
if os.path.isfile(f):
try:
st = os.stat(f)
except OSError:
# os.stat() raises OSError, not IOError if the file
# doesn't exist, so in this case we let IOError get
# raised so as to not mask possibly serious disk or
# network issues.
continue
if stat.S_IMODE(st[stat.ST_MODE]) & 0111:
try:
reject.index(f)
except ValueError:
return os.path.normpath(f)
continue
return None
def PrependPath(oldpath, newpath, sep = os.pathsep,
delete_existing=1, canonicalize=None):
"""This prepends newpath elements to the given oldpath. Will only
add any particular path once (leaving the first one it encounters
and ignoring the rest, to preserve path order), and will
os.path.normpath and os.path.normcase all paths to help assure
this. This can also handle the case where the given old path
variable is a list instead of a string, in which case a list will
be returned instead of a string.
Example:
Old Path: "/foo/bar:/foo"
New Path: "/biz/boom:/foo"
Result: "/biz/boom:/foo:/foo/bar"
If delete_existing is 0, then adding a path that exists will
not move it to the beginning; it will stay where it is in the
list.
If canonicalize is not None, it is applied to each element of
newpath before use.
"""
orig = oldpath
is_list = 1
paths = orig
if not is_List(orig) and not is_Tuple(orig):
paths = paths.split(sep)
is_list = 0
if is_String(newpath):
newpaths = newpath.split(sep)
elif not is_List(newpath) and not is_Tuple(newpath):
newpaths = [ newpath ] # might be a Dir
else:
newpaths = newpath
if canonicalize:
newpaths=list(map(canonicalize, newpaths))
if not delete_existing:
# First uniquify the old paths, making sure to
# preserve the first instance (in Unix/Linux,
# the first one wins), and remembering them in normpaths.
# Then insert the new paths at the head of the list
# if they're not already in the normpaths list.
result = []
normpaths = []
for path in paths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.append(path)
normpaths.append(normpath)
newpaths.reverse() # since we're inserting at the head
for path in newpaths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.insert(0, path)
normpaths.append(normpath)
paths = result
else:
newpaths = newpaths + paths # prepend new paths
normpaths = []
paths = []
# now we add them only if they are unique
for path in newpaths:
normpath = os.path.normpath(os.path.normcase(path))
if path and not normpath in normpaths:
paths.append(path)
normpaths.append(normpath)
if is_list:
return paths
else:
return sep.join(paths)
def AppendPath(oldpath, newpath, sep = os.pathsep,
delete_existing=1, canonicalize=None):
"""This appends new path elements to the given old path. Will
only add any particular path once (leaving the last one it
encounters and ignoring the rest, to preserve path order), and
will os.path.normpath and os.path.normcase all paths to help
assure this. This can also handle the case where the given old
path variable is a list instead of a string, in which case a list
will be returned instead of a string.
Example:
Old Path: "/foo/bar:/foo"
New Path: "/biz/boom:/foo"
Result: "/foo/bar:/biz/boom:/foo"
If delete_existing is 0, then adding a path that exists
will not move it to the end; it will stay where it is in the list.
If canonicalize is not None, it is applied to each element of
newpath before use.
"""
orig = oldpath
is_list = 1
paths = orig
if not is_List(orig) and not is_Tuple(orig):
paths = paths.split(sep)
is_list = 0
if is_String(newpath):
newpaths = newpath.split(sep)
elif not is_List(newpath) and not is_Tuple(newpath):
newpaths = [ newpath ] # might be a Dir
else:
newpaths = newpath
if canonicalize:
newpaths=list(map(canonicalize, newpaths))
if not delete_existing:
# add old paths to result, then
# add new paths if not already present
# (I thought about using a dict for normpaths for speed,
# but it's not clear hashing the strings would be faster
# than linear searching these typically short lists.)
result = []
normpaths = []
for path in paths:
if not path:
continue
result.append(path)
normpaths.append(os.path.normpath(os.path.normcase(path)))
for path in newpaths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.append(path)
normpaths.append(normpath)
paths = result
else:
# start w/ new paths, add old ones if not present,
# then reverse.
newpaths = paths + newpaths # append new paths
newpaths.reverse()
normpaths = []
paths = []
# now we add them only if they are unique
for path in newpaths:
normpath = os.path.normpath(os.path.normcase(path))
if path and not normpath in normpaths:
paths.append(path)
normpaths.append(normpath)
paths.reverse()
if is_list:
return paths
else:
return sep.join(paths)
def AddPathIfNotExists(env_dict, key, path, sep=os.pathsep):
"""This function will take 'key' out of the dictionary
'env_dict', then add the path 'path' to that key if it is not
already there. This treats the value of env_dict[key] as if it
has a similar format to the PATH variable...a list of paths
separated by tokens. The 'path' will get added to the list if it
is not already there."""
try:
is_list = 1
paths = env_dict[key]
if not is_List(env_dict[key]):
paths = paths.split(sep)
is_list = 0
if os.path.normcase(path) not in list(map(os.path.normcase, paths)):
paths = [ path ] + paths
if is_list:
env_dict[key] = paths
else:
env_dict[key] = sep.join(paths)
except KeyError:
env_dict[key] = path
if sys.platform == 'cygwin':
def get_native_path(path):
"""Transforms an absolute path into a native path for the system. In
Cygwin, this converts from a Cygwin path to a Windows one."""
return os.popen('cygpath -w ' + path).read().replace('\n', '')
else:
def get_native_path(path):
"""Transforms an absolute path into a native path for the system.
Non-Cygwin version, just leave the path alone."""
return path
display = DisplayEngine()
def Split(arg):
if is_List(arg) or is_Tuple(arg):
return arg
elif is_String(arg):
return arg.split()
else:
return [arg]
class CLVar(UserList):
"""A class for command-line construction variables.
This is a list that uses Split() to split an initial string along
white-space arguments, and similarly to split any strings that get
added. This allows us to Do the Right Thing with Append() and
Prepend() (as well as straight Python foo = env['VAR'] + 'arg1
arg2') regardless of whether a user adds a list or a string to a
command-line construction variable.
"""
def __init__(self, seq = []):
UserList.__init__(self, Split(seq))
def __add__(self, other):
return UserList.__add__(self, CLVar(other))
def __radd__(self, other):
return UserList.__radd__(self, CLVar(other))
def __coerce__(self, other):
return (self, CLVar(other))
def __str__(self):
return ' '.join(self.data)
# A dictionary that preserves the order in which items are added.
# Submitted by David Benjamin to ActiveState's Python Cookbook web site:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
# Including fixes/enhancements from the follow-on discussions.
class OrderedDict(UserDict):
def __init__(self, dict = None):
self._keys = []
UserDict.__init__(self, dict)
def __delitem__(self, key):
UserDict.__delitem__(self, key)
self._keys.remove(key)
def __setitem__(self, key, item):
UserDict.__setitem__(self, key, item)
if key not in self._keys: self._keys.append(key)
def clear(self):
UserDict.clear(self)
self._keys = []
def copy(self):
dict = OrderedDict()
dict.update(self)
return dict
def items(self):
return list(zip(self._keys, list(self.values())))
def keys(self):
return self._keys[:]
def popitem(self):
try:
key = self._keys[-1]
except IndexError:
raise KeyError('dictionary is empty')
val = self[key]
del self[key]
return (key, val)
def setdefault(self, key, failobj = None):
UserDict.setdefault(self, key, failobj)
if key not in self._keys: self._keys.append(key)
def update(self, dict):
for (key, val) in dict.items():
self.__setitem__(key, val)
def values(self):
return list(map(self.get, self._keys))
class Selector(OrderedDict):
"""A callable ordered dictionary that maps file suffixes to
dictionary values. We preserve the order in which items are added
so that get_suffix() calls always return the first suffix added."""
def __call__(self, env, source, ext=None):
if ext is None:
try:
ext = source[0].get_suffix()
except IndexError:
ext = ""
try:
return self[ext]
except KeyError:
# Try to perform Environment substitution on the keys of
# the dictionary before giving up.
s_dict = {}
for (k,v) in self.items():
if k is not None:
s_k = env.subst(k)
if s_k in s_dict:
# We only raise an error when variables point
# to the same suffix. If one suffix is literal
# and a variable suffix contains this literal,
# the literal wins and we don't raise an error.
raise KeyError(s_dict[s_k][0], k, s_k)
s_dict[s_k] = (k,v)
try:
return s_dict[ext][1]
except KeyError:
try:
return self[None]
except KeyError:
return None
if sys.platform == 'cygwin':
# On Cygwin, os.path.normcase() lies, so just report back the
# fact that the underlying Windows OS is case-insensitive.
def case_sensitive_suffixes(s1, s2):
return 0
else:
def case_sensitive_suffixes(s1, s2):
return (os.path.normcase(s1) != os.path.normcase(s2))
def adjustixes(fname, pre, suf, ensure_suffix=False):
if pre:
path, fn = os.path.split(os.path.normpath(fname))
if fn[:len(pre)] != pre:
fname = os.path.join(path, pre + fn)
# Only append a suffix if the suffix we're going to add isn't already
# there, and if either we've been asked to ensure the specific suffix
# is present or there's no suffix on it at all.
if suf and fname[-len(suf):] != suf and \
(ensure_suffix or not splitext(fname)[1]):
fname = fname + suf
return fname
# From Tim Peters,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# (Also in the printed Python Cookbook.)
def unique(s):
"""Return a list of the elements in s, but without duplicates.
For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3],
unique("abcabc") some permutation of ["a", "b", "c"], and
unique(([1, 2], [2, 3], [1, 2])) some permutation of
[[2, 3], [1, 2]].
For best speed, all sequence elements should be hashable. Then
unique() will usually work in linear time.
If not possible, the sequence elements should enjoy a total
ordering, and if list(s).sort() doesn't raise TypeError it's
assumed that they do enjoy a total ordering. Then unique() will
usually work in O(N*log2(N)) time.
If that's not possible either, the sequence elements must support
equality-testing. Then unique() will usually work in quadratic
time.
"""
n = len(s)
if n == 0:
return []
# Try using a dict first, as that's the fastest and will usually
# work. If it doesn't work, it will usually fail quickly, so it
# usually doesn't cost much to *try* it. It requires that all the
# sequence elements be hashable, and support equality comparison.
u = {}
try:
for x in s:
u[x] = 1
except TypeError:
pass # move on to the next method
else:
return list(u.keys())
del u
# We can't hash all the elements. Second fastest is to sort,
# which brings the equal elements together; then duplicates are
# easy to weed out in a single pass.
# NOTE: Python's list.sort() was designed to be efficient in the
# presence of many duplicate elements. This isn't true of all
# sort functions in all languages or libraries, so this approach
# is more effective in Python than it may be elsewhere.
try:
t = sorted(s)
except TypeError:
pass # move on to the next method
else:
assert n > 0
last = t[0]
lasti = i = 1
while i < n:
if t[i] != last:
t[lasti] = last = t[i]
lasti = lasti + 1
i = i + 1
return t[:lasti]
del t
# Brute force is all that's left.
u = []
for x in s:
if x not in u:
u.append(x)
return u
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# A more efficient implementation of Alex's uniquer(), this avoids the
# idfun() argument and function-call overhead by assuming that all
# items in the sequence are hashable.
def uniquer_hashables(seq):
seen = {}
result = []
for item in seq:
#if not item in seen:
if item not in seen:
seen[item] = 1
result.append(item)
return result
# Recipe 19.11 "Reading Lines with Continuation Characters",
# by Alex Martelli, straight from the Python CookBook (2nd edition).
def logical_lines(physical_lines, joiner=''.join):
logical_line = []
for line in physical_lines:
stripped = line.rstrip()
if stripped.endswith('\\'):
# a line which continues w/the next physical line
logical_line.append(stripped[:-1])
else:
# a line which does not continue, end of logical line
logical_line.append(line)
yield joiner(logical_line)
logical_line = []
if logical_line:
# end of sequence implies end of last logical line
yield joiner(logical_line)
class LogicalLines(object):
""" Wrapper class for the logical_lines method.
Allows us to read all "logical" lines at once from a
given file object.
"""
def __init__(self, fileobj):
self.fileobj = fileobj
def readlines(self):
result = [l for l in logical_lines(self.fileobj)]
return result
class UniqueList(UserList):
def __init__(self, seq = []):
UserList.__init__(self, seq)
self.unique = True
def __make_unique(self):
if not self.unique:
self.data = uniquer_hashables(self.data)
self.unique = True
def __lt__(self, other):
self.__make_unique()
return UserList.__lt__(self, other)
def __le__(self, other):
self.__make_unique()
return UserList.__le__(self, other)
def __eq__(self, other):
self.__make_unique()
return UserList.__eq__(self, other)
def __ne__(self, other):
self.__make_unique()
return UserList.__ne__(self, other)
def __gt__(self, other):
self.__make_unique()
return UserList.__gt__(self, other)
def __ge__(self, other):
self.__make_unique()
return UserList.__ge__(self, other)
def __cmp__(self, other):
self.__make_unique()
return UserList.__cmp__(self, other)
def __len__(self):
self.__make_unique()
return UserList.__len__(self)
def __getitem__(self, i):
self.__make_unique()
return UserList.__getitem__(self, i)
def __setitem__(self, i, item):
UserList.__setitem__(self, i, item)
self.unique = False
def __getslice__(self, i, j):
self.__make_unique()
return UserList.__getslice__(self, i, j)
def __setslice__(self, i, j, other):
UserList.__setslice__(self, i, j, other)
self.unique = False
def __add__(self, other):
result = UserList.__add__(self, other)
result.unique = False
return result
def __radd__(self, other):
result = UserList.__radd__(self, other)
result.unique = False
return result
def __iadd__(self, other):
result = UserList.__iadd__(self, other)
result.unique = False
return result
def __mul__(self, other):
result = UserList.__mul__(self, other)
result.unique = False
return result
def __rmul__(self, other):
result = UserList.__rmul__(self, other)
result.unique = False
return result
def __imul__(self, other):
result = UserList.__imul__(self, other)
result.unique = False
return result
def append(self, item):
UserList.append(self, item)
self.unique = False
def insert(self, i):
UserList.insert(self, i)
self.unique = False
def count(self, item):
self.__make_unique()
return UserList.count(self, item)
def index(self, item):
self.__make_unique()
return UserList.index(self, item)
def reverse(self):
self.__make_unique()
UserList.reverse(self)
def sort(self, *args, **kwds):
self.__make_unique()
return UserList.sort(self, *args, **kwds)
def extend(self, other):
UserList.extend(self, other)
self.unique = False
class Unbuffered(object):
"""
A proxy class that wraps a file object, flushing after every write,
and delegating everything else to the wrapped object.
"""
def __init__(self, file):
self.file = file
self.softspace = 0 ## backward compatibility; not supported in Py3k
def write(self, arg):
try:
self.file.write(arg)
self.file.flush()
except IOError:
# Stdout might be connected to a pipe that has been closed
# by now. The most likely reason for the pipe being closed
# is that the user has press ctrl-c. It this is the case,
# then SCons is currently shutdown. We therefore ignore
# IOError's here so that SCons can continue and shutdown
# properly so that the .sconsign is correctly written
# before SCons exits.
pass
def __getattr__(self, attr):
return getattr(self.file, attr)
def make_path_relative(path):
""" makes an absolute path name to a relative pathname.
"""
if os.path.isabs(path):
drive_s,path = os.path.splitdrive(path)
import re
if not drive_s:
path=re.compile("/*(.*)").findall(path)[0]
else:
path=path[1:]
assert( not os.path.isabs( path ) ), path
return path
# The original idea for AddMethod() and RenameFunction() come from the
# following post to the ActiveState Python Cookbook:
#
# ASPN: Python Cookbook : Install bound methods in an instance
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/223613
#
# That code was a little fragile, though, so the following changes
# have been wrung on it:
#
# * Switched the installmethod() "object" and "function" arguments,
# so the order reflects that the left-hand side is the thing being
# "assigned to" and the right-hand side is the value being assigned.
#
# * Changed explicit type-checking to the "try: klass = object.__class__"
# block in installmethod() below so that it still works with the
# old-style classes that SCons uses.
#
# * Replaced the by-hand creation of methods and functions with use of
# the "new" module, as alluded to in Alex Martelli's response to the
# following Cookbook post:
#
# ASPN: Python Cookbook : Dynamically added methods to a class
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/81732
def AddMethod(obj, function, name=None):
"""
Adds either a bound method to an instance or an unbound method to
a class. If name is ommited the name of the specified function
is used by default.
Example:
a = A()
def f(self, x, y):
self.z = x + y
AddMethod(f, A, "add")
a.add(2, 4)
print a.z
AddMethod(lambda self, i: self.l[i], a, "listIndex")
print a.listIndex(5)
"""
if name is None:
name = function.func_name
else:
function = RenameFunction(function, name)
if hasattr(obj, '__class__') and obj.__class__ is not type:
# "obj" is an instance, so it gets a bound method.
setattr(obj, name, MethodType(function, obj, obj.__class__))
else:
# "obj" is a class, so it gets an unbound method.
setattr(obj, name, MethodType(function, None, obj))
def RenameFunction(function, name):
"""
Returns a function identical to the specified function, but with
the specified name.
"""
return FunctionType(function.func_code,
function.func_globals,
name,
function.func_defaults)
md5 = False
def MD5signature(s):
return str(s)
def MD5filesignature(fname, chunksize=65536):
f = open(fname, "rb")
result = f.read()
f.close()
return result
try:
import hashlib
except ImportError:
pass
else:
if hasattr(hashlib, 'md5'):
md5 = True
def MD5signature(s):
m = hashlib.md5()
m.update(str(s))
return m.hexdigest()
def MD5filesignature(fname, chunksize=65536):
m = hashlib.md5()
f = open(fname, "rb")
while True:
blck = f.read(chunksize)
if not blck:
break
m.update(str(blck))
f.close()
return m.hexdigest()
def MD5collect(signatures):
"""
Collects a list of signatures into an aggregate signature.
signatures - a list of signatures
returns - the aggregate signature
"""
if len(signatures) == 1:
return signatures[0]
else:
return MD5signature(', '.join(signatures))
def silent_intern(x):
"""
Perform sys.intern() on the passed argument and return the result.
If the input is ineligible (e.g. a unicode string) the original argument is
returned and no exception is thrown.
"""
try:
return sys.intern(x)
except TypeError:
return x
# From Dinu C. Gherman,
# Python Cookbook, second edition, recipe 6.17, p. 277.
# Also:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/68205
# ASPN: Python Cookbook: Null Object Design Pattern
#TODO??? class Null(object):
class Null(object):
""" Null objects always and reliably "do nothing." """
def __new__(cls, *args, **kwargs):
if not '_instance' in vars(cls):
cls._instance = super(Null, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __repr__(self):
return "Null(0x%08X)" % id(self)
def __nonzero__(self):
return False
def __getattr__(self, name):
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
class NullSeq(Null):
def __len__(self):
return 0
def __iter__(self):
return iter(())
def __getitem__(self, i):
return self
def __delitem__(self, i):
return self
def __setitem__(self, i, v):
return self
del __revision__
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
xiaohaidao007/pandoraBox-SDK-mt7620
|
staging_dir/host/lib/scons-2.5.0/SCons/Util.py
|
Python
|
gpl-2.0
| 50,268
|
[
"VisIt"
] |
b6df6501ec6a442a17a01626b23459e97f51635e8a172b97ddf4f7d5e90ac129
|
"""Implementation of the WebSocket protocol.
`WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional
communication between the browser and server.
WebSockets are supported in the current versions of all major browsers,
although older versions that do not support WebSockets are still in use
(refer to http://caniuse.com/websockets for details).
This module implements the final version of the WebSocket protocol as
defined in `RFC 6455 <http://tools.ietf.org/html/rfc6455>`_. Certain
browser versions (notably Safari 5.x) implemented an earlier draft of
the protocol (known as "draft 76") and are not compatible with this module.
.. versionchanged:: 4.0
Removed support for the draft 76 protocol version.
"""
# pylint: skip-file
from __future__ import absolute_import, division, print_function
# Author: Jacob Kristhammar, 2010
import base64
import collections
import hashlib
import os
import struct
import salt.ext.tornado.escape as tornado_escape
import salt.ext.tornado.web as tornado_web
import zlib
from salt.ext.tornado.concurrent import TracebackFuture
from salt.ext.tornado.escape import utf8, native_str, to_unicode
from salt.ext.tornado import gen, httpclient, httputil
from salt.ext.tornado.ioloop import IOLoop, PeriodicCallback
from salt.ext.tornado.iostream import StreamClosedError
from salt.ext.tornado.log import gen_log, app_log
from salt.ext.tornado import simple_httpclient
from salt.ext.tornado.tcpclient import TCPClient
from salt.ext.tornado.util import _websocket_mask, PY3
if PY3:
from urllib.parse import urlparse # py2
xrange = range
else:
from urlparse import urlparse # py3
class WebSocketError(Exception):
pass
class WebSocketClosedError(WebSocketError):
"""Raised by operations on a closed connection.
.. versionadded:: 3.2
"""
pass
class WebSocketHandler(tornado_web.RequestHandler):
"""Subclass this class to create a basic WebSocket handler.
Override `on_message` to handle incoming messages, and use
`write_message` to send messages to the client. You can also
override `open` and `on_close` to handle opened and closed
connections.
Custom upgrade response headers can be sent by overriding
`~tornado.web.RequestHandler.set_default_headers` or
`~tornado.web.RequestHandler.prepare`.
See http://dev.w3.org/html5/websockets/ for details on the
JavaScript interface. The protocol is specified at
http://tools.ietf.org/html/rfc6455.
Here is an example WebSocket handler that echos back all received messages
back to the client:
.. testcode::
class EchoWebSocket(tornado.websocket.WebSocketHandler):
def open(self):
print("WebSocket opened")
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print("WebSocket closed")
.. testoutput::
:hide:
WebSockets are not standard HTTP connections. The "handshake" is
HTTP, but after the handshake, the protocol is
message-based. Consequently, most of the Tornado HTTP facilities
are not available in handlers of this type. The only communication
methods available to you are `write_message()`, `ping()`, and
`close()`. Likewise, your request handler class should implement
`open()` method rather than ``get()`` or ``post()``.
If you map the handler above to ``/websocket`` in your application, you can
invoke it in JavaScript with::
var ws = new WebSocket("ws://localhost:8888/websocket");
ws.onopen = function() {
ws.send("Hello, world");
};
ws.onmessage = function (evt) {
alert(evt.data);
};
This script pops up an alert box that says "You said: Hello, world".
Web browsers allow any site to open a websocket connection to any other,
instead of using the same-origin policy that governs other network
access from javascript. This can be surprising and is a potential
security hole, so since Tornado 4.0 `WebSocketHandler` requires
applications that wish to receive cross-origin websockets to opt in
by overriding the `~WebSocketHandler.check_origin` method (see that
method's docs for details). Failure to do so is the most likely
cause of 403 errors when making a websocket connection.
When using a secure websocket connection (``wss://``) with a self-signed
certificate, the connection from a browser may fail because it wants
to show the "accept this certificate" dialog but has nowhere to show it.
You must first visit a regular HTML page using the same certificate
to accept it before the websocket connection will succeed.
If the application setting ``websocket_ping_interval`` has a non-zero
value, a ping will be sent periodically, and the connection will be
closed if a response is not received before the ``websocket_ping_timeout``.
Messages larger than the ``websocket_max_message_size`` application setting
(default 10MiB) will not be accepted.
.. versionchanged:: 4.5
Added ``websocket_ping_interval``, ``websocket_ping_timeout``, and
``websocket_max_message_size``.
"""
def __init__(self, application, request, **kwargs):
super(WebSocketHandler, self).__init__(application, request, **kwargs)
self.ws_connection = None
self.close_code = None
self.close_reason = None
self.stream = None
self._on_close_called = False
@tornado_web.asynchronous
def get(self, *args, **kwargs):
self.open_args = args
self.open_kwargs = kwargs
# Upgrade header should be present and should be equal to WebSocket
if self.request.headers.get("Upgrade", "").lower() != 'websocket':
self.set_status(400)
log_msg = "Can \"Upgrade\" only to \"WebSocket\"."
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Connection header should be upgrade.
# Some proxy servers/load balancers
# might mess with it.
headers = self.request.headers
connection = map(lambda s: s.strip().lower(),
headers.get("Connection", "").split(","))
if 'upgrade' not in connection:
self.set_status(400)
log_msg = "\"Connection\" must be \"Upgrade\"."
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Handle WebSocket Origin naming convention differences
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if "Origin" in self.request.headers:
origin = self.request.headers.get("Origin")
else:
origin = self.request.headers.get("Sec-Websocket-Origin", None)
# If there was an origin header, check to make sure it matches
# according to check_origin. When the origin is None, we assume it
# did not come from a browser and that it can be passed on.
if origin is not None and not self.check_origin(origin):
self.set_status(403)
log_msg = "Cross origin websockets not allowed"
self.finish(log_msg)
gen_log.debug(log_msg)
return
self.ws_connection = self.get_websocket_protocol()
if self.ws_connection:
self.ws_connection.accept_connection()
else:
self.set_status(426, "Upgrade Required")
self.set_header("Sec-WebSocket-Version", "7, 8, 13")
self.finish()
stream = None
@property
def ping_interval(self):
"""The interval for websocket keep-alive pings.
Set websocket_ping_interval = 0 to disable pings.
"""
return self.settings.get('websocket_ping_interval', None)
@property
def ping_timeout(self):
"""If no ping is received in this many seconds,
close the websocket connection (VPNs, etc. can fail to cleanly close ws connections).
Default is max of 3 pings or 30 seconds.
"""
return self.settings.get('websocket_ping_timeout', None)
@property
def max_message_size(self):
"""Maximum allowed message size.
If the remote peer sends a message larger than this, the connection
will be closed.
Default is 10MiB.
"""
return self.settings.get('websocket_max_message_size', None)
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket.
The message may be either a string or a dict (which will be
encoded as json). If the ``binary`` argument is false, the
message will be sent as utf8; in binary mode any byte string
is allowed.
If the connection is already closed, raises `WebSocketClosedError`.
.. versionchanged:: 3.2
`WebSocketClosedError` was added (previously a closed connection
would raise an `AttributeError`)
.. versionchanged:: 4.3
Returns a `.Future` which can be used for flow control.
"""
if self.ws_connection is None:
raise WebSocketClosedError()
if isinstance(message, dict):
message = tornado_escape.json_encode(message)
return self.ws_connection.write_message(message, binary=binary)
def select_subprotocol(self, subprotocols):
"""Invoked when a new WebSocket requests specific subprotocols.
``subprotocols`` is a list of strings identifying the
subprotocols proposed by the client. This method may be
overridden to return one of those strings to select it, or
``None`` to not select a subprotocol. Failure to select a
subprotocol does not automatically abort the connection,
although clients may close the connection if none of their
proposed subprotocols was selected.
"""
return None
def get_compression_options(self):
"""Override to return compression options for the connection.
If this method returns None (the default), compression will
be disabled. If it returns a dict (even an empty one), it
will be enabled. The contents of the dict may be used to
control the following compression options:
``compression_level`` specifies the compression level.
``mem_level`` specifies the amount of memory used for the internal compression state.
These parameters are documented in details here:
https://docs.python.org/3.6/library/zlib.html#zlib.compressobj
.. versionadded:: 4.1
.. versionchanged:: 4.5
Added ``compression_level`` and ``mem_level``.
"""
# TODO: Add wbits option.
return None
def open(self, *args, **kwargs):
"""Invoked when a new WebSocket is opened.
The arguments to `open` are extracted from the `tornado.web.URLSpec`
regular expression, just like the arguments to
`tornado.web.RequestHandler.get`.
"""
pass
def on_message(self, message):
"""Handle incoming messages on the WebSocket
This method must be overridden.
.. versionchanged:: 4.5
``on_message`` can be a coroutine.
"""
raise NotImplementedError
def ping(self, data):
"""Send ping frame to the remote end."""
if self.ws_connection is None:
raise WebSocketClosedError()
self.ws_connection.write_ping(data)
def on_pong(self, data):
"""Invoked when the response to a ping frame is received."""
pass
def on_ping(self, data):
"""Invoked when the a ping frame is received."""
pass
def on_close(self):
"""Invoked when the WebSocket is closed.
If the connection was closed cleanly and a status code or reason
phrase was supplied, these values will be available as the attributes
``self.close_code`` and ``self.close_reason``.
.. versionchanged:: 4.0
Added ``close_code`` and ``close_reason`` attributes.
"""
pass
def close(self, code=None, reason=None):
"""Closes this Web Socket.
Once the close handshake is successful the socket will be closed.
``code`` may be a numeric status code, taken from the values
defined in `RFC 6455 section 7.4.1
<https://tools.ietf.org/html/rfc6455#section-7.4.1>`_.
``reason`` may be a textual message about why the connection is
closing. These values are made available to the client, but are
not otherwise interpreted by the websocket protocol.
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.ws_connection:
self.ws_connection.close(code, reason)
self.ws_connection = None
def check_origin(self, origin):
"""Override to enable support for allowing alternate origins.
The ``origin`` argument is the value of the ``Origin`` HTTP
header, the url responsible for initiating this request. This
method is not called for clients that do not send this header;
such requests are always allowed (because all browsers that
implement WebSockets support this header, and non-browser
clients do not have the same cross-site security concerns).
Should return True to accept the request or False to reject it.
By default, rejects all requests with an origin on a host other
than this one.
This is a security protection against cross site scripting attacks on
browsers, since WebSockets are allowed to bypass the usual same-origin
policies and don't use CORS headers.
.. warning::
This is an important security measure; don't disable it
without understanding the security implications. In
particular, if your authentication is cookie-based, you
must either restrict the origins allowed by
``check_origin()`` or implement your own XSRF-like
protection for websocket connections. See `these
<https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html>`_
`articles
<https://devcenter.heroku.com/articles/websocket-security>`_
for more.
To accept all cross-origin traffic (which was the default prior to
Tornado 4.0), simply override this method to always return true::
def check_origin(self, origin):
return True
To allow connections from any subdomain of your site, you might
do something like::
def check_origin(self, origin):
parsed_origin = urllib.parse.urlparse(origin)
return parsed_origin.netloc.endswith(".mydomain.com")
.. versionadded:: 4.0
"""
parsed_origin = urlparse(origin)
origin = parsed_origin.netloc
origin = origin.lower()
host = self.request.headers.get("Host")
# Check to see that origin matches host directly, including ports
return origin == host
def set_nodelay(self, value):
"""Set the no-delay flag for this stream.
By default, small messages may be delayed and/or combined to minimize
the number of packets sent. This can sometimes cause 200-500ms delays
due to the interaction between Nagle's algorithm and TCP delayed
ACKs. To reduce this delay (at the expense of possibly increasing
bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
connection is established.
See `.BaseIOStream.set_nodelay` for additional details.
.. versionadded:: 3.1
"""
self.stream.set_nodelay(value)
def on_connection_close(self):
if self.ws_connection:
self.ws_connection.on_connection_close()
self.ws_connection = None
if not self._on_close_called:
self._on_close_called = True
self.on_close()
self._break_cycles()
def _break_cycles(self):
# WebSocketHandlers call finish() early, but we don't want to
# break up reference cycles (which makes it impossible to call
# self.render_string) until after we've really closed the
# connection (if it was established in the first place,
# indicated by status code 101).
if self.get_status() != 101 or self._on_close_called:
super(WebSocketHandler, self)._break_cycles()
def send_error(self, *args, **kwargs):
if self.stream is None:
super(WebSocketHandler, self).send_error(*args, **kwargs)
else:
# If we get an uncaught exception during the handshake,
# we have no choice but to abruptly close the connection.
# TODO: for uncaught exceptions after the handshake,
# we can close the connection more gracefully.
self.stream.close()
def get_websocket_protocol(self):
websocket_version = self.request.headers.get("Sec-WebSocket-Version")
if websocket_version in ("7", "8", "13"):
return WebSocketProtocol13(
self, compression_options=self.get_compression_options())
def _attach_stream(self):
self.stream = self.request.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
# disable non-WS methods
for method in ["write", "redirect", "set_header", "set_cookie",
"set_status", "flush", "finish"]:
setattr(self, method, _raise_not_supported_for_websockets)
def _raise_not_supported_for_websockets(*args, **kwargs):
raise RuntimeError("Method not supported for Web Sockets")
class WebSocketProtocol(object):
"""Base class for WebSocket protocol versions.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.stream = handler.stream
self.client_terminated = False
self.server_terminated = False
def _run_callback(self, callback, *args, **kwargs):
"""Runs the given callback with exception handling.
If the callback is a coroutine, returns its Future. On error, aborts the
websocket connection and returns None.
"""
try:
result = callback(*args, **kwargs)
except Exception:
app_log.error("Uncaught exception in %s",
getattr(self.request, 'path', None), exc_info=True)
self._abort()
else:
if result is not None:
result = gen.convert_yielded(result)
self.stream.io_loop.add_future(result, lambda f: f.result())
return result
def on_connection_close(self):
self._abort()
def _abort(self):
"""Instantly aborts the WebSocket connection by closing the socket"""
self.client_terminated = True
self.server_terminated = True
self.stream.close() # forcibly tear down the connection
self.close() # let the subclass cleanup
class _PerMessageDeflateCompressor(object):
def __init__(self, persistent, max_wbits, compression_options=None):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
# There is no symbolic constant for the minimum wbits value.
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if compression_options is None or 'compression_level' not in compression_options:
self._compression_level = tornado_web.GZipContentEncoding.GZIP_LEVEL
else:
self._compression_level = compression_options['compression_level']
if compression_options is None or 'mem_level' not in compression_options:
self._mem_level = 8
else:
self._mem_level = compression_options['mem_level']
if persistent:
self._compressor = self._create_compressor()
else:
self._compressor = None
def _create_compressor(self):
return zlib.compressobj(self._compression_level, zlib.DEFLATED, -self._max_wbits, self._mem_level)
def compress(self, data):
compressor = self._compressor or self._create_compressor()
data = (compressor.compress(data) +
compressor.flush(zlib.Z_SYNC_FLUSH))
assert data.endswith(b'\x00\x00\xff\xff')
return data[:-4]
class _PerMessageDeflateDecompressor(object):
def __init__(self, persistent, max_wbits, compression_options=None):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if persistent:
self._decompressor = self._create_decompressor()
else:
self._decompressor = None
def _create_decompressor(self):
return zlib.decompressobj(-self._max_wbits)
def decompress(self, data):
decompressor = self._decompressor or self._create_decompressor()
return decompressor.decompress(data + b'\x00\x00\xff\xff')
class WebSocketProtocol13(WebSocketProtocol):
"""Implementation of the WebSocket protocol from RFC 6455.
This class supports versions 7 and 8 of the protocol in addition to the
final version 13.
"""
# Bit masks for the first byte of a frame.
FIN = 0x80
RSV1 = 0x40
RSV2 = 0x20
RSV3 = 0x10
RSV_MASK = RSV1 | RSV2 | RSV3
OPCODE_MASK = 0x0f
def __init__(self, handler, mask_outgoing=False,
compression_options=None):
WebSocketProtocol.__init__(self, handler)
self.mask_outgoing = mask_outgoing
self._final_frame = False
self._frame_opcode = None
self._masked_frame = None
self._frame_mask = None
self._frame_length = None
self._fragmented_message_buffer = None
self._fragmented_message_opcode = None
self._waiting = None
self._compression_options = compression_options
self._decompressor = None
self._compressor = None
self._frame_compressed = None
# The total uncompressed size of all messages received or sent.
# Unicode messages are encoded to utf8.
# Only for testing; subject to change.
self._message_bytes_in = 0
self._message_bytes_out = 0
# The total size of all packets received or sent. Includes
# the effect of compression, frame overhead, and control frames.
self._wire_bytes_in = 0
self._wire_bytes_out = 0
self.ping_callback = None
self.last_ping = 0
self.last_pong = 0
def accept_connection(self):
try:
self._handle_websocket_headers()
except ValueError:
self.handler.set_status(400)
log_msg = "Missing/Invalid WebSocket headers"
self.handler.finish(log_msg)
gen_log.debug(log_msg)
return
try:
self._accept_connection()
except ValueError:
gen_log.debug("Malformed WebSocket request received",
exc_info=True)
self._abort()
return
def _handle_websocket_headers(self):
"""Verifies all invariant- and required headers
If a header is missing or have an incorrect value ValueError will be
raised
"""
fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
if not all(map(lambda f: self.request.headers.get(f), fields)):
raise ValueError("Missing/Invalid WebSocket headers")
@staticmethod
def compute_accept_value(key):
"""Computes the value for the Sec-WebSocket-Accept header,
given the value for Sec-WebSocket-Key.
"""
sha1 = hashlib.sha1()
sha1.update(utf8(key))
sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value
return native_str(base64.b64encode(sha1.digest()))
def _challenge_response(self):
return WebSocketProtocol13.compute_accept_value(
self.request.headers.get("Sec-Websocket-Key"))
def _accept_connection(self):
subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '')
subprotocols = [s.strip() for s in subprotocols.split(',')]
if subprotocols:
selected = self.handler.select_subprotocol(subprotocols)
if selected:
assert selected in subprotocols
self.handler.set_header("Sec-WebSocket-Protocol", selected)
extensions = self._parse_extensions_header(self.request.headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
# TODO: negotiate parameters if compression_options
# specifies limits.
self._create_compressors('server', ext[1], self._compression_options)
if ('client_max_window_bits' in ext[1] and
ext[1]['client_max_window_bits'] is None):
# Don't echo an offered client_max_window_bits
# parameter with no value.
del ext[1]['client_max_window_bits']
self.handler.set_header("Sec-WebSocket-Extensions",
httputil._encode_header(
'permessage-deflate', ext[1]))
break
self.handler.clear_header("Content-Type")
self.handler.set_status(101)
self.handler.set_header("Upgrade", "websocket")
self.handler.set_header("Connection", "Upgrade")
self.handler.set_header("Sec-WebSocket-Accept", self._challenge_response())
self.handler.finish()
self.handler._attach_stream()
self.stream = self.handler.stream
self.start_pinging()
self._run_callback(self.handler.open, *self.handler.open_args,
**self.handler.open_kwargs)
self._receive_frame()
def _parse_extensions_header(self, headers):
extensions = headers.get("Sec-WebSocket-Extensions", '')
if extensions:
return [httputil._parse_header(e.strip())
for e in extensions.split(',')]
return []
def _process_server_headers(self, key, headers):
"""Process the headers sent by the server to this client connection.
'key' is the websocket handshake challenge/response key.
"""
assert headers['Upgrade'].lower() == 'websocket'
assert headers['Connection'].lower() == 'upgrade'
accept = self.compute_accept_value(key)
assert headers['Sec-Websocket-Accept'] == accept
extensions = self._parse_extensions_header(headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
self._create_compressors('client', ext[1])
else:
raise ValueError("unsupported extension %r", ext)
def _get_compressor_options(self, side, agreed_parameters, compression_options=None):
"""Converts a websocket agreed_parameters set to keyword arguments
for our compressor objects.
"""
options = dict(
persistent=(side + '_no_context_takeover') not in agreed_parameters)
wbits_header = agreed_parameters.get(side + '_max_window_bits', None)
if wbits_header is None:
options['max_wbits'] = zlib.MAX_WBITS
else:
options['max_wbits'] = int(wbits_header)
options['compression_options'] = compression_options
return options
def _create_compressors(self, side, agreed_parameters, compression_options=None):
# TODO: handle invalid parameters gracefully
allowed_keys = set(['server_no_context_takeover',
'client_no_context_takeover',
'server_max_window_bits',
'client_max_window_bits'])
for key in agreed_parameters:
if key not in allowed_keys:
raise ValueError("unsupported compression parameter %r" % key)
other_side = 'client' if (side == 'server') else 'server'
self._compressor = _PerMessageDeflateCompressor(
**self._get_compressor_options(side, agreed_parameters, compression_options))
self._decompressor = _PerMessageDeflateDecompressor(
**self._get_compressor_options(other_side, agreed_parameters, compression_options))
def _write_frame(self, fin, opcode, data, flags=0):
if fin:
finbit = self.FIN
else:
finbit = 0
frame = struct.pack("B", finbit | opcode | flags)
l = len(data)
if self.mask_outgoing:
mask_bit = 0x80
else:
mask_bit = 0
if l < 126:
frame += struct.pack("B", l | mask_bit)
elif l <= 0xFFFF:
frame += struct.pack("!BH", 126 | mask_bit, l)
else:
frame += struct.pack("!BQ", 127 | mask_bit, l)
if self.mask_outgoing:
mask = os.urandom(4)
data = mask + _websocket_mask(mask, data)
frame += data
self._wire_bytes_out += len(frame)
try:
return self.stream.write(frame)
except StreamClosedError:
self._abort()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket."""
if binary:
opcode = 0x2
else:
opcode = 0x1
message = tornado_escape.utf8(message)
assert isinstance(message, bytes)
self._message_bytes_out += len(message)
flags = 0
if self._compressor:
message = self._compressor.compress(message)
flags |= self.RSV1
return self._write_frame(True, opcode, message, flags=flags)
def write_ping(self, data):
"""Send ping frame."""
assert isinstance(data, bytes)
self._write_frame(True, 0x9, data)
def _receive_frame(self):
try:
self.stream.read_bytes(2, self._on_frame_start)
except StreamClosedError:
self._abort()
def _on_frame_start(self, data):
self._wire_bytes_in += len(data)
header, payloadlen = struct.unpack("BB", data)
self._final_frame = header & self.FIN
reserved_bits = header & self.RSV_MASK
self._frame_opcode = header & self.OPCODE_MASK
self._frame_opcode_is_control = self._frame_opcode & 0x8
if self._decompressor is not None and self._frame_opcode != 0:
self._frame_compressed = bool(reserved_bits & self.RSV1)
reserved_bits &= ~self.RSV1
if reserved_bits:
# client is using as-yet-undefined extensions; abort
self._abort()
return
self._masked_frame = bool(payloadlen & 0x80)
payloadlen = payloadlen & 0x7f
if self._frame_opcode_is_control and payloadlen >= 126:
# control frames must have payload < 126
self._abort()
return
try:
if payloadlen < 126:
self._frame_length = payloadlen
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self._read_frame_data(False)
elif payloadlen == 126:
self.stream.read_bytes(2, self._on_frame_length_16)
elif payloadlen == 127:
self.stream.read_bytes(8, self._on_frame_length_64)
except StreamClosedError:
self._abort()
def _read_frame_data(self, masked):
new_len = self._frame_length
if self._fragmented_message_buffer is not None:
new_len += len(self._fragmented_message_buffer)
if new_len > (self.handler.max_message_size or 10 * 1024 * 1024):
self.close(1009, "message too big")
return
self.stream.read_bytes(
self._frame_length,
self._on_masked_frame_data if masked else self._on_frame_data)
def _on_frame_length_16(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!H", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self._read_frame_data(False)
except StreamClosedError:
self._abort()
def _on_frame_length_64(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!Q", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self._read_frame_data(False)
except StreamClosedError:
self._abort()
def _on_masking_key(self, data):
self._wire_bytes_in += len(data)
self._frame_mask = data
try:
self._read_frame_data(True)
except StreamClosedError:
self._abort()
def _on_masked_frame_data(self, data):
# Don't touch _wire_bytes_in; we'll do it in _on_frame_data.
self._on_frame_data(_websocket_mask(self._frame_mask, data))
def _on_frame_data(self, data):
handled_future = None
self._wire_bytes_in += len(data)
if self._frame_opcode_is_control:
# control frames may be interleaved with a series of fragmented
# data frames, so control frames must not interact with
# self._fragmented_*
if not self._final_frame:
# control frames must not be fragmented
self._abort()
return
opcode = self._frame_opcode
elif self._frame_opcode == 0: # continuation frame
if self._fragmented_message_buffer is None:
# nothing to continue
self._abort()
return
self._fragmented_message_buffer += data
if self._final_frame:
opcode = self._fragmented_message_opcode
data = self._fragmented_message_buffer
self._fragmented_message_buffer = None
else: # start of new data message
if self._fragmented_message_buffer is not None:
# can't start new message until the old one is finished
self._abort()
return
if self._final_frame:
opcode = self._frame_opcode
else:
self._fragmented_message_opcode = self._frame_opcode
self._fragmented_message_buffer = data
if self._final_frame:
handled_future = self._handle_message(opcode, data)
if not self.client_terminated:
if handled_future:
# on_message is a coroutine, process more frames once it's done.
handled_future.add_done_callback(
lambda future: self._receive_frame())
else:
self._receive_frame()
def _handle_message(self, opcode, data):
"""Execute on_message, returning its Future if it is a coroutine."""
if self.client_terminated:
return
if self._frame_compressed:
data = self._decompressor.decompress(data)
if opcode == 0x1:
# UTF-8 data
self._message_bytes_in += len(data)
try:
decoded = data.decode("utf-8")
except UnicodeDecodeError:
self._abort()
return
return self._run_callback(self.handler.on_message, decoded)
elif opcode == 0x2:
# Binary data
self._message_bytes_in += len(data)
return self._run_callback(self.handler.on_message, data)
elif opcode == 0x8:
# Close
self.client_terminated = True
if len(data) >= 2:
self.handler.close_code = struct.unpack('>H', data[:2])[0]
if len(data) > 2:
self.handler.close_reason = to_unicode(data[2:])
# Echo the received close code, if any (RFC 6455 section 5.5.1).
self.close(self.handler.close_code)
elif opcode == 0x9:
# Ping
self._write_frame(True, 0xA, data)
self._run_callback(self.handler.on_ping, data)
elif opcode == 0xA:
# Pong
self.last_pong = IOLoop.current().time()
return self._run_callback(self.handler.on_pong, data)
else:
self._abort()
def close(self, code=None, reason=None):
"""Closes the WebSocket connection."""
if not self.server_terminated:
if not self.stream.closed():
if code is None and reason is not None:
code = 1000 # "normal closure" status code
if code is None:
close_data = b''
else:
close_data = struct.pack('>H', code)
if reason is not None:
close_data += utf8(reason)
self._write_frame(True, 0x8, close_data)
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
self.stream.io_loop.remove_timeout(self._waiting)
self._waiting = None
self.stream.close()
elif self._waiting is None:
# Give the client a few seconds to complete a clean shutdown,
# otherwise just close the connection.
self._waiting = self.stream.io_loop.add_timeout(
self.stream.io_loop.time() + 5, self._abort)
@property
def ping_interval(self):
interval = self.handler.ping_interval
if interval is not None:
return interval
return 0
@property
def ping_timeout(self):
timeout = self.handler.ping_timeout
if timeout is not None:
return timeout
return max(3 * self.ping_interval, 30)
def start_pinging(self):
"""Start sending periodic pings to keep the connection alive"""
if self.ping_interval > 0:
self.last_ping = self.last_pong = IOLoop.current().time()
self.ping_callback = PeriodicCallback(
self.periodic_ping, self.ping_interval * 1000)
self.ping_callback.start()
def periodic_ping(self):
"""Send a ping to keep the websocket alive
Called periodically if the websocket_ping_interval is set and non-zero.
"""
if self.stream.closed() and self.ping_callback is not None:
self.ping_callback.stop()
return
# Check for timeout on pong. Make sure that we really have
# sent a recent ping in case the machine with both server and
# client has been suspended since the last ping.
now = IOLoop.current().time()
since_last_pong = now - self.last_pong
since_last_ping = now - self.last_ping
if (since_last_ping < 2 * self.ping_interval and
since_last_pong > self.ping_timeout):
self.close()
return
self.write_ping(b'')
self.last_ping = now
class WebSocketClientConnection(simple_httpclient._HTTPConnection):
"""WebSocket client connection.
This class should not be instantiated directly; use the
`websocket_connect` function instead.
"""
def __init__(self, io_loop, request, on_message_callback=None,
compression_options=None, ping_interval=None, ping_timeout=None,
max_message_size=None):
self.compression_options = compression_options
self.connect_future = TracebackFuture()
self.protocol = None
self.read_future = None
self.read_queue = collections.deque()
self.key = base64.b64encode(os.urandom(16))
self._on_message_callback = on_message_callback
self.close_code = self.close_reason = None
self.ping_interval = ping_interval
self.ping_timeout = ping_timeout
self.max_message_size = max_message_size
scheme, sep, rest = request.url.partition(':')
scheme = {'ws': 'http', 'wss': 'https'}[scheme]
request.url = scheme + sep + rest
request.headers.update({
'Upgrade': 'websocket',
'Connection': 'Upgrade',
'Sec-WebSocket-Key': self.key,
'Sec-WebSocket-Version': '13',
})
if self.compression_options is not None:
# Always offer to let the server set our max_wbits (and even though
# we don't offer it, we will accept a client_no_context_takeover
# from the server).
# TODO: set server parameters for deflate extension
# if requested in self.compression_options.
request.headers['Sec-WebSocket-Extensions'] = (
'permessage-deflate; client_max_window_bits')
self.tcp_client = TCPClient(io_loop=io_loop)
super(WebSocketClientConnection, self).__init__(
io_loop, None, request, lambda: None, self._on_http_response,
104857600, self.tcp_client, 65536, 104857600)
def close(self, code=None, reason=None):
"""Closes the websocket connection.
``code`` and ``reason`` are documented under
`WebSocketHandler.close`.
.. versionadded:: 3.2
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.protocol is not None:
self.protocol.close(code, reason)
self.protocol = None
def on_connection_close(self):
if not self.connect_future.done():
self.connect_future.set_exception(StreamClosedError())
self.on_message(None)
self.tcp_client.close()
super(WebSocketClientConnection, self).on_connection_close()
def _on_http_response(self, response):
if not self.connect_future.done():
if response.error:
self.connect_future.set_exception(response.error)
else:
self.connect_future.set_exception(WebSocketError(
"Non-websocket response"))
def headers_received(self, start_line, headers):
if start_line.code != 101:
return super(WebSocketClientConnection, self).headers_received(
start_line, headers)
self.headers = headers
self.protocol = self.get_websocket_protocol()
self.protocol._process_server_headers(self.key, self.headers)
self.protocol.start_pinging()
self.protocol._receive_frame()
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
self.stream = self.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
# Once we've taken over the connection, clear the final callback
# we set on the http request. This deactivates the error handling
# in simple_httpclient that would otherwise interfere with our
# ability to see exceptions.
self.final_callback = None
self.connect_future.set_result(self)
def write_message(self, message, binary=False):
"""Sends a message to the WebSocket server."""
return self.protocol.write_message(message, binary)
def read_message(self, callback=None):
"""Reads a message from the WebSocket server.
If on_message_callback was specified at WebSocket
initialization, this function will never return messages
Returns a future whose result is the message, or None
if the connection is closed. If a callback argument
is given it will be called with the future when it is
ready.
"""
assert self.read_future is None
future = TracebackFuture()
if self.read_queue:
future.set_result(self.read_queue.popleft())
else:
self.read_future = future
if callback is not None:
self.io_loop.add_future(future, callback)
return future
def on_message(self, message):
if self._on_message_callback:
self._on_message_callback(message)
elif self.read_future is not None:
self.read_future.set_result(message)
self.read_future = None
else:
self.read_queue.append(message)
def on_pong(self, data):
pass
def on_ping(self, data):
pass
def get_websocket_protocol(self):
return WebSocketProtocol13(self, mask_outgoing=True,
compression_options=self.compression_options)
def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None,
on_message_callback=None, compression_options=None,
ping_interval=None, ping_timeout=None,
max_message_size=None):
"""Client-side websocket support.
Takes a url and returns a Future whose result is a
`WebSocketClientConnection`.
``compression_options`` is interpreted in the same way as the
return value of `.WebSocketHandler.get_compression_options`.
The connection supports two styles of operation. In the coroutine
style, the application typically calls
`~.WebSocketClientConnection.read_message` in a loop::
conn = yield websocket_connect(url)
while True:
msg = yield conn.read_message()
if msg is None: break
# Do something with msg
In the callback style, pass an ``on_message_callback`` to
``websocket_connect``. In both styles, a message of ``None``
indicates that the connection has been closed.
.. versionchanged:: 3.2
Also accepts ``HTTPRequest`` objects in place of urls.
.. versionchanged:: 4.1
Added ``compression_options`` and ``on_message_callback``.
The ``io_loop`` argument is deprecated.
.. versionchanged:: 4.5
Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size``
arguments, which have the same meaning as in `WebSocketHandler`.
"""
if io_loop is None:
io_loop = IOLoop.current()
if isinstance(url, httpclient.HTTPRequest):
assert connect_timeout is None
request = url
# Copy and convert the headers dict/object (see comments in
# AsyncHTTPClient.fetch)
request.headers = httputil.HTTPHeaders(request.headers)
else:
request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
request = httpclient._RequestProxy(
request, httpclient.HTTPRequest._DEFAULTS)
conn = WebSocketClientConnection(io_loop, request,
on_message_callback=on_message_callback,
compression_options=compression_options,
ping_interval=ping_interval,
ping_timeout=ping_timeout,
max_message_size=max_message_size)
if callback is not None:
io_loop.add_future(conn.connect_future, callback)
return conn.connect_future
|
saltstack/salt
|
salt/ext/tornado/websocket.py
|
Python
|
apache-2.0
| 48,071
|
[
"VisIt"
] |
82b9b00d35c4a3ea0260487a7caa591f29f2f6ae40b843cc35fc00aff34e5013
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Run bowtie2 command and skips the manual run of naming intermediate output
files. Bowtie2 help:
<http://bowtie-bio.sourceforge.net/bowtie2/index.shtml>
"""
import sys
import logging
from jcvi.formats.base import BaseFile
from jcvi.utils.cbook import percentage
from jcvi.formats.sam import output_bam, get_prefix, get_samfile
from jcvi.apps.base import OptionParser, ActionDispatcher, need_update, sh, get_abs_path
first_tag = lambda fp: next(fp).split()[0]
class BowtieLogFile(BaseFile):
"""
Simple file that contains mapping rate:
100000 reads; of these:
100000 (100.00%) were unpaired; of these:
88453 (88.45%) aligned 0 times
9772 (9.77%) aligned exactly 1 time
1775 (1.77%) aligned >1 times
11.55% overall alignment rate
"""
def __init__(self, filename):
super(BowtieLogFile, self).__init__(filename)
fp = open(filename)
self.total = int(first_tag(fp))
self.unpaired = int(first_tag(fp))
self.unmapped = int(first_tag(fp))
self.unique = int(first_tag(fp))
self.multiple = int(first_tag(fp))
self.mapped = self.unique + self.multiple
self.rate = float(first_tag(fp).rstrip("%"))
fp.close()
def __str__(self):
return "Total mapped: {0}".format(percentage(self.mapped, self.total))
__repr__ = __str__
def main():
actions = (
("index", "wraps bowtie2-build"),
("align", "wraps bowtie2"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def check_index(dbfile):
dbfile = get_abs_path(dbfile)
safile = dbfile + ".1.bt2"
if need_update(dbfile, safile):
cmd = "bowtie2-build {0} {0}".format(dbfile)
sh(cmd)
else:
logging.error("`{0}` exists. `bowtie2-build` already run.".format(safile))
return dbfile
def index(args):
"""
%prog index database.fasta
Wrapper for `bowtie2-build`. Same interface.
"""
p = OptionParser(index.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(dbfile,) = args
check_index(dbfile)
def align(args):
"""
%prog align database.fasta read1.fq [read2.fq]
Wrapper for `bowtie2` single-end or paired-end, depending on the number of args.
"""
from jcvi.formats.fastq import guessoffset
p = OptionParser(align.__doc__)
p.set_firstN(firstN=0)
p.add_option(
"--full",
default=False,
action="store_true",
help="Enforce end-to-end alignment [default: local]",
)
p.add_option(
"--reorder",
default=False,
action="store_true",
help="Keep the input read order",
)
p.add_option(
"--null",
default=False,
action="store_true",
help="Do not write to SAM/BAM output",
)
p.add_option(
"--fasta", default=False, action="store_true", help="Query reads are FASTA"
)
p.set_cutoff(cutoff=800)
p.set_mateorientation(mateorientation="+-")
p.set_sam_options(bowtie=True)
opts, args = p.parse_args(args)
extra = opts.extra
mo = opts.mateorientation
if mo == "+-":
extra += ""
elif mo == "-+":
extra += "--rf"
else:
extra += "--ff"
PE = True
if len(args) == 2:
logging.debug("Single-end alignment")
PE = False
elif len(args) == 3:
logging.debug("Paired-end alignment")
else:
sys.exit(not p.print_help())
firstN = opts.firstN
mapped = opts.mapped
unmapped = opts.unmapped
fasta = opts.fasta
gl = "--end-to-end" if opts.full else "--local"
dbfile, readfile = args[0:2]
dbfile = check_index(dbfile)
prefix = get_prefix(readfile, dbfile)
samfile, mapped, unmapped = get_samfile(
readfile, dbfile, bowtie=True, mapped=mapped, unmapped=unmapped, bam=opts.bam
)
logfile = prefix + ".log"
if not fasta:
offset = guessoffset([readfile])
if not need_update(dbfile, samfile):
logging.error("`{0}` exists. `bowtie2` already run.".format(samfile))
return samfile, logfile
cmd = "bowtie2 -x {0}".format(dbfile)
if PE:
r1, r2 = args[1:3]
cmd += " -1 {0} -2 {1}".format(r1, r2)
cmd += " --maxins {0}".format(opts.cutoff)
mtag, utag = "--al-conc", "--un-conc"
else:
cmd += " -U {0}".format(readfile)
mtag, utag = "--al", "--un"
if mapped:
cmd += " {0} {1}".format(mtag, mapped)
if unmapped:
cmd += " {0} {1}".format(utag, unmapped)
if firstN:
cmd += " --upto {0}".format(firstN)
cmd += " -p {0}".format(opts.cpus)
if fasta:
cmd += " -f"
else:
cmd += " --phred{0}".format(offset)
cmd += " {0}".format(gl)
if opts.reorder:
cmd += " --reorder"
cmd += " {0}".format(extra)
# Finally the log
cmd += " 2> {0}".format(logfile)
if opts.null:
samfile = "/dev/null"
cmd = output_bam(cmd, samfile)
sh(cmd)
print(open(logfile).read(), file=sys.stderr)
return samfile, logfile
if __name__ == "__main__":
main()
|
tanghaibao/jcvi
|
jcvi/apps/bowtie.py
|
Python
|
bsd-2-clause
| 5,219
|
[
"Bowtie"
] |
8aa2fae7ef2346fce87dbe40026258d3eda1dee185b313ca2c8c3fee38bf4dd5
|
"""Testing for Gaussian process regression """
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Licence: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, ConstantKernel as C, WhiteKernel
from sklearn.utils.testing \
import (assert_true, assert_greater, assert_array_less,
assert_almost_equal, assert_equal)
def f(x):
return x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=1.0), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2))
* RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2))
* RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))
+ C(1e-5, (1e-5, 1e2)),
C(0.1, (1e-2, 1e2))
* RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))
+ C(1e-5, (1e-5, 1e2))]
def test_gpr_interpolation():
"""Test the interpolating property for different kernels."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_true(np.allclose(y_pred, y))
assert_true(np.allclose(np.diag(y_cov), 0.))
def test_lml_improving():
""" Test that hyperparameter-tuning improves log-marginal likelihood. """
for kernel in kernels:
if kernel == fixed_kernel: continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
""" Test that lml of optimized kernel is stored correctly. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_equal(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood())
def test_converged_to_local_maximum():
""" Test that we are in local maximum after hyperparameter-optimization."""
for kernel in kernels:
if kernel == fixed_kernel: continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4)
| (gpr.kernel_.theta == gpr.kernel_.bounds[:, 0])
| (gpr.kernel_.theta == gpr.kernel_.bounds[:, 1])))
def test_solution_inside_bounds():
""" Test that hyperparameter-optimization remains in bounds"""
for kernel in kernels:
if kernel == fixed_kernel: continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
bounds = gpr.kernel_.bounds
max_ = np.finfo(gpr.kernel_.theta.dtype).max
tiny = 1e-10
bounds[~np.isfinite(bounds[:, 1]), 1] = max_
assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
def test_lml_gradient():
""" Compare analytic and numeric gradient of log marginal likelihood. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpr.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_prior():
""" Test that GP prior has mean 0 and identical variances."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel)
y_mean, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_mean, 0, 5)
if len(gpr.kernel.theta) > 1:
# XXX: quite hacky, works only for current kernels
assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
else:
assert_almost_equal(np.diag(y_cov), 1, 5)
def test_sample_statistics():
""" Test that statistics of samples drawn from GP are correct."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
samples = gpr.sample_y(X2, 1000000)
# More digits accuracy would require many more samples
assert_almost_equal(y_mean, np.mean(samples, 1), 2)
assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(),
np.var(samples, 1) / np.diag(y_cov).max(), 1)
def test_no_optimizer():
""" Test that kernel parameters are unmodified when optimizer is None."""
kernel = RBF(1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
assert_equal(np.exp(gpr.kernel_.theta), 1.0)
def test_predict_cov_vs_std():
""" Test that predicted std.-dev. is consistent with cov's diagonal."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
y_mean, y_std = gpr.predict(X2, return_std=True)
assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
def test_anisotropic_kernel():
""" Test that GPR can identify meaningful anisotropic length-scales. """
# We learn a function which varies in one dimension ten-times slower
# than in the other. The corresponding length-scales should differ by at
# least a factor 5
rng = np.random.RandomState(0)
X = rng.uniform(-1, 1, (50, 2))
y = X[:, 0] + 0.1 * X[:, 1]
kernel = RBF([1.0, 1.0])
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(np.exp(gpr.kernel_.theta[1]),
np.exp(gpr.kernel_.theta[0]) * 5)
def test_random_starts():
"""
Test that an increasing number of random-starts of GP fitting only
increases the log marginal likelihood of the chosen theta.
"""
n_samples, n_features = 25, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) \
+ rng.normal(scale=0.1, size=n_samples)
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1.0] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
last_lml = -np.inf
for n_restarts_optimizer in range(9):
gp = GaussianProcessRegressor(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0,).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_y_normalization():
""" Test normalization of the target values in GP
Fitting non-normalizing GP on normalized y and fitting normalizing GP
on unnormalized y should yield identical results
"""
y_mean = y.mean(0)
y_norm = y - y_mean
for kernel in kernels:
# Fit non-normalizing GP on normalized y
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X, y_norm)
# Fit normalizing GP on unnormalized y
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_norm.fit(X, y)
# Compare predicted mean, std-devs and covariances
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
y_pred = y_mean + y_pred
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
assert_almost_equal(y_pred, y_pred_norm)
assert_almost_equal(y_pred_std, y_pred_std_norm)
_, y_cov = gpr.predict(X2, return_cov=True)
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
assert_almost_equal(y_cov, y_cov_norm)
def test_y_multioutput():
""" Test that GPR can deal with multi-dimensional target values"""
y_2d = np.vstack((y, y*2)).T
# Test for fixed kernel that first dimension of 2d GP equals the output
# of 1d GP and that second dimension is twice as large
kernel = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr_2d.fit(X, y_2d)
y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
_, y_cov_1d = gpr.predict(X2, return_cov=True)
_, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
# Standard deviation and covariance do not depend on output
assert_almost_equal(y_std_1d, y_std_2d)
assert_almost_equal(y_cov_1d, y_cov_2d)
y_sample_1d = gpr.sample_y(X2, n_samples=10)
y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
assert_almost_equal(y_sample_1d, y_sample_2d[:, 0])
# Test hyperparameter optimization
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_2d.fit(X, np.vstack((y, y)).T)
assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
def test_custom_optimizer():
""" Test that GPR can use externally defined optimizers. """
# Define a dummy optimizer that simply tests 1000 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(1000):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel: continue
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
gpr.fit(X, y)
# Checks that optimizer improved marginal likelihood
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(gpr.kernel.theta))
def test_duplicate_input():
""" Test GPR can handle two different output-values for the same input. """
for kernel in kernels:
gpr_equal_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
gpr_similar_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
X_ = np.vstack((X, X[0]))
y_ = np.hstack((y, y[0] + 1))
gpr_equal_inputs.fit(X_, y_)
X_ = np.vstack((X, X[0] + 1e-15))
y_ = np.hstack((y, y[0] + 1))
gpr_similar_inputs.fit(X_, y_)
X_test = np.linspace(0, 10, 100)[:, None]
y_pred_equal, y_std_equal = \
gpr_equal_inputs.predict(X_test, return_std=True)
y_pred_similar, y_std_similar = \
gpr_similar_inputs.predict(X_test, return_std=True)
assert_almost_equal(y_pred_equal, y_pred_similar)
assert_almost_equal(y_std_equal, y_std_similar)
|
kashif/scikit-learn
|
sklearn/gaussian_process/tests/test_gpr.py
|
Python
|
bsd-3-clause
| 11,870
|
[
"Gaussian"
] |
f2c0c5c251cf4cba689abf5e8f8e5aae32e86c0e44cd5abe9b53a108c5f0f2ec
|
#!/usr/bin/python
import os
import glob
from os import system
from shutil import copyfile
import re
import string
import sys
system('clear') # Clear the screen and start fresh
rs=system('make makestr.x') # need the exit status here...
if rs!=0: sys.exit("\n\nCompilation of makestr.x failed\n")
#tests=glob.glob('tests/struct_enum.in.*')
#f=open('tests/list')
system('rm vasp.0007')
system('./makestr.x ./tests/13struct.out 7')
rs=system('diff -q vasp.0007 tests/vasp.0007_from2x3_Rods_case')
if rs!=0:
sys.exit("\n --- Failure in the first of Rod's 2x3 cases ---\n")
else:
print "\n <<< First test of makestr.x passed >>>\n"
#for itest in tests:
# # Define a regexp for catching the test #
# r1 = re.compile('.*struct_enum\.in\.(...)')
# m = r1.match(itest) # Make the match
# Nt = m.group(1) # Pull out the group from the match --- the Test Number
# print "Starting Test "+Nt+"..."
# print string.rstrip(f.readline()) # Print corresponding line from "testlist"
#
# copyfile('tests/struct_enum.in.'+Nt,'struct_enum.in')
# system('./multienum.x')
# # Check for difference between this run and saved output
# rs=system('diff -q struct_enum.out tests/struct_enum.out.'+Nt)
#
# if rs!=0:
# #system('diff struct_enum.out tests/struct_enum.out.'+Nt)
# #raw_input()
# sys.exit("\n\nFailure in Test #:"+Nt)
# #copyfile('test.out','tests/'+Nt+'.out')
# print "\n\nFailure in Test #:"+Nt
# else: print "Test",Nt,"Passed"
# #system('rm str?.txt')
#system('rm test.out '+Nt+'.out')
#print "\n <<< All tests passed >>>\n"
|
msg-byu/enumlib
|
support/newtests.py
|
Python
|
mit
| 1,579
|
[
"VASP"
] |
9bea60c13b2a9163768ab00241e959134ea98b3d6c12af4706a1f10a375d3f9c
|
# Name: WL_Script.py
#
# Weak-Lensing "Study of Systematics and Classification of Compact Objects" Program I
#
# Type: python script
#
# Description: Central script that develops the whole process of reading images, filtering into galaxies and stars, correcting sizes and shapes, correcting PSF annisotropies, and re-classify compact objects into galaxies to obtain a final catalogue
#
# Returns: FITS image - mass-density map
# Catalogs
# Plots
# FITS image - trial from Source Extractor
#
__author__ = "Guadalupe Canas Herrera"
__copyright__ = "Copyright (C) 2015 G. Canas Herrera"
__license__ = "Public Domain"
__version__ = "4.0.0"
__maintainer__ = "Guadalupe Canas"
__email__ = "gch24@alumnos.unican.es"
# Improvements: more automatic ---> only needs the name of the picture, the catalogue (in case you have it) and the BAND you want to analize
# Old CatalogPlotter3.py has been splitted in two: WL_Script.py and WL_Utils.py
# Also call: WL_utils.py, WL_filter_mag_gal.py - WL_ellip_fitter.py (written by Guadalupe Canas Herrera)
# Also call 2: Source Extractor (by Emmanuel Bertin V2.3.2), sex2fiat (by DAVID WITTMAN v1.2), fiatfilter (by DAVID WITTMAN v1.2), ellipto (by DAVID WITTMAN v1.2), dlscombine (by DAVID WITTMAN v1.2 and modified by GUADALUPE CANAS)
#
# DLSCOMBINE CORRECTS PSF: it has a dependence in fiat.c, fiat.h, dlscombine_utils.c, dlscombine.c, dlscombine.h
# Guadalupe Canas Herrera modified fiat.c, dlscombine_utils.c, dlscombine.h
#
import matplotlib.pyplot as plt #Works for making python behave like matlab
#import sextutils as sex #Program used to read the original catalog
import numpy as np #Maths arrays and more
import numpy.ma as ma #Masking arrays
import sys #Strings inputs
import math #mathematical functions
import subprocess #calling to the terminal
from astropy.modeling import models, fitting #Package for fitting Legendre Polynomials
import warnings #Advices
from mpl_toolkits.mplot3d import Axes3D #Plotting in 3D
import WL_ellip_fitter as ellip_fit #Ellipticity fitting
from WL_Utils import sex_caller, sex_caller_corrected, ellipto_caller, dlscombine_pol_caller, dlscombine_leg_caller, ds9_caller, plotter, ellipticity, specfile, stars_maker, galaxies_maker, specfile_r, specfile_z
from WL_filter_mag_gal import filter_mag #Filtering final catalog of galaxies a function of magnitudes and call fiatmap
import seaborn as sns
import matplotlib.pylab as P #histograms
from Class_CrossMatching import CrossMatching
from Class_CatalogReader import CatalogReader
############################### BEGIN SCRIPT ###############################
# (1): We define the ending of the input/output files
type_fits = ".fits"
type_cat = ".cat"
type_fcat = ".fcat"
type_good = "_good.fcat"
type_galaxies = "_galaxies.fcat"
type_stars = "_stars.fcat"
type_ellipto_galaxies = "_ellipto_galaxies.fcat"
type_ellipto_stars = "_ellipto_stars.fcat"
type_shapes_galaxies = "_shapes_galaxies.fcat"
type_shapes_stars = "_shapes_stars.fcat"
type_match = "_match.fcat"
def main():
sns.set(style="white", palette="muted", color_codes=True)
print("Welcome to the Weak-Lensing Script, here to help you analizing Subaru images in search of galaxy clusters")
print("")
array_file_name = []
# (1): Ask the number of image that did the cross-matching process.
question = int(raw_input("Please, tell me how many pictures did the cross-matching: "))
cont = 0
BEFORE_NAME = ''
FILE_NAME = ''
#print FILE_NAME
FILE_NAME_CORRECTED= ''
while cont < question:
# (2): We need to read the image and band. We ask in screen the image of the region of the sky.
filter =raw_input("Introduce the name of the filter: ")
fits = raw_input("Please, introduce the name of the fits image you want to read or directly the catalogue: ")
#Save the name of the .fits and .cat in a string:
BEFORE_NAME = fits.find('.')
FILE_NAME = fits[:BEFORE_NAME]
#print FILE_NAME
FILE_NAME_CORRECTED='{}_corrected'.format(FILE_NAME)
if fits.endswith(type_fits):
#(3) STEP: Call Source Extractor
print("Let me call Source Extractor (called sex by friends). It will obtain the celestial objects. When it finishes I will show you the trial image")
print("")
catalog_name = sex_caller(fits, FILE_NAME)
#Show results of trial.fits
#subprocess.call('./ds9 {}_trial.fits'.format(FILE_NAME), shell=True)
#(4): Transform Source Extractor catalog into FIAT FORMAT
print("I'm transforming the catalog into a FIAT 1.0 format")
print("")
catalog_name_fiat= '{}.fcat'.format(FILE_NAME)
transform_into_fiat='perl sex2fiat.pl {}>{}'.format(catalog_name, catalog_name_fiat)
subprocess.call(transform_into_fiat, shell=True)
if fits.endswith(type_fcat):
catalog_name_fiat = fits
fits = raw_input("Please, introduce the name of the fits image: ")
#(5): Read the FIAT Catalog
FWHM_max_stars=0
names = ["number", "flux_iso", "fluxerr_iso", "mag_iso", "magger_iso", "mag_aper_1", "magerr_aper_1", "mag", "magger", "flux_max", "isoarea", "x", "y", "ra", "dec", "ixx", "iyy", "ixy", "ixxWIN", "iyyWIN", "ixyWIN", "A", "B", "theta", "enlogation", "ellipticity", "FWHM", "flags", "class_star"]
fcat = np.genfromtxt(catalog_name_fiat, names=names)
P.figure()
P.hist(fcat['class_star'], 50, normed=1, histtype='stepfilled')
P.show()
#Let's fix the ellipcity + and - for all celestial objects
#(6): plot FWHM vs mag_iso
print("I'm ploting MAG_ISO vs. FWHM")
magnitude1='mag_iso'
magnitude2='FWHM'
plotter(fcat, magnitude1, magnitude2, 2, '$mag(iso)$', '$FWHM/pixels$')
plt.show()
print("Do you want to fix axis limits? Please answer with y or n")
answer=raw_input()
if answer== "y":
xmin=float(raw_input("X min: "))
xmax=float(raw_input("X max: "))
ymin=float(raw_input("Y min: "))
ymax=float(raw_input("Y max: "))
#Fix limits
plotter(catalog_name, magnitude1, magnitude2, 3)
plt.xlim(xmin,xmax)
plt.ylim(ymin,ymax)
plt.show(block=False)
elif answer == "n":
plt.show(block=False)
else:
plt.show(block=False)
# (7): Obtaining a GOOD CATALOG without blank spaces and filter saturate objects
print("This catalog is not the good one. I'll show you why")
print("")
magnitude_x="x"
magnitude_y="y"
plotter(fcat, magnitude_x, magnitude_y, 4, '$x/pixels$', '$y/pixels$')
plt.show(block=False)
print("Please, introduce the values you prefer to bound x and y")
xmin_good=float(raw_input("X min: "))
xmax_good=float(raw_input("X max: "))
ymin_good=float(raw_input("Y min: "))
ymax_good=float(raw_input("Y max: "))
catalog_name_good= '{}{}'.format(FILE_NAME, type_good)
terminal_good= 'perl fiatfilter.pl "x>{} && x<{} && y>{} && y<{} && FLUX_ISO<3000000" {}>{}'.format(xmin_good, xmax_good, ymin_good, ymax_good, catalog_name_fiat, catalog_name_good)
subprocess.call(terminal_good, shell=True)
print("Wait a moment, I'm showing you the results in a sec")
fcat_good = np.genfromtxt(catalog_name_good, names=names)
print np.amax(fcat_good['flux_iso'])
plotter(fcat_good, 'x', 'y', 5, '$x/pixels$', '$y/pixels$')
plt.show(block=False)
ellipticity(fcat_good, 1)
plt.show(block=False)
plotter(fcat_good, magnitude1, magnitude2, 2, '$mag(iso)$', '$FWHM/pixels$')
plt.show(block=False)
#(8.1.): Creating STARS CATALOG
print("Let's obtain only a FIAT catalog that contains stars. We need to bound. Have a look to the FWHM vs Mag_ISO plot")
mag_iso_min_stars=float(raw_input("Enter the minimum value for mag_iso: "))
mag_iso_max_stars=float(raw_input("Enter the maximum value for mag_iso: "))
FWHM_min_stars=float(raw_input("Enter the minimum value for FWHM: "))
FWHM_max_stars=float(raw_input("Enter the maximum value for FWHM: "))
catalog_name_stars= '{}{}'.format(FILE_NAME, type_stars)
#Creamos un string para que lo ponga en la terminal
terminal_stars= 'perl fiatfilter.pl "MAG_ISO>{} && MAG_ISO<{} && FWHM>{} && FWHM<{} && CLASS_STAR>0.9 && FLUX_ISO<3000000" {}>{}'.format(mag_iso_min_stars, mag_iso_max_stars, FWHM_min_stars, FWHM_max_stars, catalog_name_good, catalog_name_stars)
subprocess.call(terminal_stars, shell=True)
fcat_stars=np.genfromtxt(catalog_name_stars, names=names)
ellipticity(fcat_stars, 6)
plt.show(block=False)
#(8.2.): Checking STARS CATALOG with Source Extractor Neural Network Output
P.figure()
P.hist(fcat_stars['class_star'], 50, normed=1, histtype='stepfilled')
P.show(block=False)
#(9.1.): Creating GALAXIES CATALOG
print("Let's obtain only a FIAT catalog that contains galaxies. We need to bound. Have a look to the FWHM vs Mag_ISO plot")
print("")
print("First, I'm going to perform a linear fit. Tell me the values of mag_iso")
mag_iso_min_galaxies=float(raw_input("Enter the minimum value for mag_iso: "))
mag_iso_max_galaxies=float(raw_input("Enter the maximum value for mag_iso: "))
catalog_name_fit='{}_fit{}'.format(FILE_NAME, type_galaxies)
#Creamos un string para que lo ponga en la terminal
terminal_fit= 'perl fiatfilter.pl -v "MAG_ISO>{} && MAG_ISO<{}" {}>{}'.format(mag_iso_min_galaxies, mag_iso_max_galaxies, catalog_name_good, catalog_name_fit)
subprocess.call(terminal_fit, shell=True)
fcat_fit = np.genfromtxt(catalog_name_fit, names=names)
fit=np.polyfit(fcat_fit['mag_iso'], fcat_fit['FWHM'], 1)
#Save in variables the values of the fitting
m=fit[0]
n=fit[1]
print 'The value of the y-intercep n={} and the value of the slope m={}'.format(n,m)
# Once you have the values of the fitting we can obtain the catalog of galaxies
catalog_name_galaxies= '{}{}'.format(FILE_NAME, type_galaxies)
#terminal_galaxies= 'perl fiatfilter.pl -v "FWHM>{}*MAG_ISO+{} && FWHM>{} && CLASS_STAR<0.1 && FLUX_ISO<3000000" {}>{}'.format(m, n, FWHM_max_stars, catalog_name_good, catalog_name_galaxies)
terminal_galaxies= 'perl fiatfilter.pl -v "FWHM>{}*MAG_ISO+{} && FWHM>{} && FLUX_ISO<3000000" {}>{}'.format(m, n, FWHM_max_stars, catalog_name_good, catalog_name_galaxies)
subprocess.call(terminal_galaxies, shell=True)
fcat_galaxies=np.genfromtxt(catalog_name_galaxies, names=names)
#subprocess.call('./fiatreview {} {}'.format(fits, catalog_name_galaxies), shell=True)
magnitude1='mag_iso'
magnitude2='FWHM'
plotter(fcat_good, magnitude1, magnitude2, 2, '$mag(iso)$', '$FWHM/pixels$')
mag_th= np.linspace(1, 30, 1000)
p = np.poly1d(fit)
plt.plot(mag_th, p(mag_th), 'b-')
plt.show()
ellipticity(fcat_galaxies, 9)
plt.show()
#(9.2.): Checking GALAXIES CATALOG with Source Extractor Neural Network Output
P.figure()
P.hist(fcat_galaxies['class_star'], 50, normed=1, histtype='stepfilled')
P.show(block=False)
# (***) CHECKING FOR STARS // GALAXIES DIVISION
weights_stars=np.ones_like(fcat_stars['class_star'])/len(fcat_stars['class_star'])
weights_galaxies=np.ones_like(fcat_galaxies['class_star'])/len(fcat_galaxies['class_star'])
weights_all = np.ones_like(fcat_good['class_star'])/len(fcat_good['class_star'])
plt.figure()
plt.hist(fcat_stars['class_star'], weights = weights_stars, bins= 5, histtype='stepfilled', label ='stars')
plt.hist(fcat_galaxies['class_star'], weights = weights_galaxies, bins= 5, histtype='stepfilled', label ='galaxies')
plt.legend(loc='upper right')
plt.xlabel('$class_{star}$', labelpad=20, fontsize=20)
plt.ylabel('$Frequency$', fontsize=20)
plt.ylim(0,0.6)
plt.show()
plt.hist(fcat_good['class_star'], color= 'r', weights = weights_all, bins=50, histtype='stepfilled', label ='all')
plt.legend(loc='upper right')
plt.xlabel('$class_{star}$', labelpad=20, fontsize=20)
plt.ylabel('$Frequency$', fontsize=20)
plt.ylim(0,0.6)
plt.show()
plt.show()
#(10): Calling Ellipto to recalculate shapes and ellipticities: ELLIPTO CATALOG
print("")
print("Now it is necessary to call ellipto in order to obtain in a proper way sizes and shapes both for galaxies and stars")
print("")
print("STARS")
print("")
catalog_name_ellipto_stars='{}{}'.format(FILE_NAME, type_ellipto_stars)
ellipto_caller(catalog_name_stars, fits, catalog_name_ellipto_stars)
print("GALAXIES")
catalog_name_ellipto_galaxies='{}{}'.format(FILE_NAME, type_ellipto_galaxies)
ellipto_caller(catalog_name_galaxies, fits, catalog_name_ellipto_galaxies)
print("DONE")
print("")
#(11): Now we clasify the catalogs obtained with ellipto forcing fiat filter: SHAPES CATALOG
print("Filtering good obtained celestial object from ellipto using fiatfilter...")
print("")
print("STARS")
catalog_name_shapes_stars='{}{}'.format(FILE_NAME, type_shapes_stars)
fiatfilter_errcode_stars='perl fiatfilter.pl -v "errcode<2" {}>{}'.format(catalog_name_ellipto_stars, catalog_name_shapes_stars)
subprocess.call(fiatfilter_errcode_stars, shell=True)
print("")
print("GALAXIES")
print("")
catalog_name_shapes_galaxies='{}{}'.format(FILE_NAME, type_shapes_galaxies)
fiatfilter_errcode_galaxies='perl fiatfilter.pl -v "errcode<2" {}>{}'.format(catalog_name_ellipto_galaxies, catalog_name_shapes_galaxies)
subprocess.call(fiatfilter_errcode_galaxies, shell=True)
print("DONE")
print("")
#(12): Recalculating ellipticities for stars
print("I'm recalculating ellipticities of the new star set after being corrected by ellipto")
names_ellipto = ["x", "y", "mag_iso", "median", "ixx", "iyy", "ixy", "a_input", "b_input", "theta", "ellipticity", "errcode", "sigsky", "size", "flux", "mean_rho_4th", "sigma_e", "wander"]
fiat_shapes_stars= np.genfromtxt(catalog_name_shapes_stars, names=names_ellipto)
ellipticity(fiat_shapes_stars, 15)
plt.show()
print "Show ellipticy as a function of x and y"
plotter(fiat_shapes_stars, 'x', 'ellipticity', 2, '$x/pixels$', '$\epsilon$')
plt.show()
plotter(fiat_shapes_stars, 'y', 'ellipticity', 2, '$y/pixels$', '$\epsilon$')
plt.show()
fiat_shapes_galaxies= np.genfromtxt(catalog_name_shapes_galaxies, names=names_ellipto)
ellipticity(fiat_shapes_galaxies, 15)
plt.show(block=False)
#(13): STARS--> you obtain two fitting both for ellip_1 and ellip_2
print("")
print("I'm performing a fitting of those ellipticities e_1 and e_2: both a simple 2D polynomial fitting and a 2D Legendre Polynomial fitting")
print("")
dlscombine_file_pol=''
dlscombine_file_leg=''
#Let's call the function fit_Polynomial from ellip_fitting3.py
fitting_file_ellip_pol=ellip_fit.fit_Polynomial(FILE_NAME, fiat_shapes_stars)
#Create file read by dlscombine
dlscombine_file_pol=specfile(fits, fitting_file_ellip_pol, FILE_NAME)
print("")
#Let's call the function fit_Legendre from ellip_fitting3.py
fitting_file_ellip_leg=ellip_fit.fit_Legendre(FILE_NAME, fiat_shapes_stars)
#Create file read by dlscombine
if filter=='r':
dlscombine_file_leg=specfile_r(fits, fitting_file_ellip_leg, FILE_NAME)
if filter=='z':
dlscombine_file_leg=specfile_z(fits, fitting_file_ellip_leg, FILE_NAME)
#(14): Let's call DLSCOMBINE to correct PSF anisotropies
print("I'm correcting PSF anisotropies using dlscombine: BOTH FOR POL AND LEG FITTING")
print("")
fits_pol='{}_corrected_pol.fits'.format(FILE_NAME, FILE_NAME)
dlscombine_call_pol='./dlscombine_pol {} {}'.format(dlscombine_file_pol, fits_pol)
subprocess.call(dlscombine_call_pol, shell=True)
fits_leg='{}_corrected_leg.fits'.format(FILE_NAME, FILE_NAME)
dlscombine_call_leg='./dlscombine_leg {} {}'.format(dlscombine_file_leg, fits_leg)
subprocess.call(dlscombine_call_leg, shell=True)
#(15): Call again Source Extractor only for the Legendre Polynomial fitting
print("I'm calling again SExtractor to obtain a new catalog from the corrected picture (only from the leg fitting)")
print("")
catalog_name_corrected=sex_caller_corrected(fits_leg, FILE_NAME)
#(16): Transform .cat into .fcat (FIAT) for the corrected catalog
catalog_name_fiat_corrected='{}_corrected.fcat'.format(FILE_NAME)
transform_into_fiat_corrected='perl sex2fiat.pl {}>{}'.format(catalog_name_corrected, catalog_name_fiat_corrected)
subprocess.call(transform_into_fiat_corrected, shell=True)
print("")
array_file_name.append(catalog_name_fiat_corrected)
cont = cont + 1
NAME_1= array_file_name[0]
NAME_2= array_file_name[1]
BEFORE_NAME_1 = NAME_1.find('.')
FILE_NAME_1 = NAME_1[:BEFORE_NAME]
BEFORE_NAME_2 = NAME_2.find('.')
FILE_NAME_2 = NAME_2[:BEFORE_NAME]
#CROSS-MATCHING
catag_r = CatalogReader(array_file_name[0])
catag_r.read()
catag_z = CatalogReader(array_file_name[1])
catag_z.read()
crossmatching = CrossMatching(catag_r.fcat, catag_z.fcat)
crossmatching.kdtree(n=1*1e-06)
crossmatching.catalog_writter('2CM_{}'.format(FILE_NAME_1), compare = '1to2')
print '\n'
crossmatching.catalog_writter('2CM_{}'.format(FILE_NAME_2), compare = '2to1')
FILE_NAME_FINAL = raw_input("Please, tell me the FINAL name: ")
if crossmatching.cont1to2<crossmatching.cont2to1:
catag_final_1 = CatalogReader('2CM_{}{}'.format(FILE_NAME_1, type_fcat))
catag_final_1.read()
catag_final_2 = CatalogReader('2CM_{}{}'.format(FILE_NAME_2, type_fcat))
catag_final_2.read()
crossmatching_final = CrossMatching(catag_final_1.fcat, catag_final_2.fcat)
crossmatching_final.kdtree(n=1*1e-06)
crossmatching.catalog_writter('{}'.format(FILE_NAME_FINAL), compare = '1to2')
if crossmatching.cont1to2>crossmatching.cont2to1:
catag_final_1 = CatalogReader('2CM_{}{}'.format(FILE_NAME_1, type_fcat))
catag_final_1.read()
catag_final_2 = CatalogReader('2CM_{}{}'.format(FILE_NAME_2, type_fcat))
catag_final_2.read()
crossmatching_final = CrossMatching(catag_final_1.fcat, catag_final_2.fcat)
crossmatching_final.kdtree(n=1*1e-06)
crossmatching.catalog_writter('{}'.format(FILE_NAME_FINAL), compare = '2to1')
if crossmatching.cont1to2==crossmatching.cont2to1:
catag_final_1 = CatalogReader('2CM_{}{}'.format(FILE_NAME_1, type_fcat))
catag_final_1.read()
catag_final_2 = CatalogReader('2CM_{}{}'.format(FILE_NAME_2, type_fcat))
catag_final_2.read()
crossmatching_final = CrossMatching(catag_final_1.fcat, catag_final_2.fcat)
crossmatching_final.kdtree(n=1*1e-06)
crossmatching.catalog_writter('{}'.format(FILE_NAME_FINAL), compare = '1to2')
catalog_name_fiat_corrected_final = '{}{}'.format(FILE_NAME_FINAL, fcat)
#(17): Transform again tshe corrected catalog into a GOOD catalog
catalog_name_corrected_good= '{}{}'.format(FILE_NAME_FINAL, type_good)
terminal_corrected_good= 'perl fiatfilter.pl "x>{} && x<{} && y>{} && y<{}" {}>{}'.format(xmin_good, xmax_good, ymin_good, ymax_good, catalog_name_fiat_corrected_final, catalog_name_corrected_good)
subprocess.call(terminal_corrected_good, shell=True)
FILE_NAME_CORRECTED='{}_corrected'.format(FILE_NAME_FINAL)
#(18): STARS CATALOG again...
print("Now we need to repeat the classification to obtain only galaxies and stars as we did before")
print("")
print("Let me show you again the FWHM vs MAG plot \n")
print("")
fcat_corrected=np.genfromtxt(catalog_name_corrected_good, names=names)
plotter(fcat_corrected, 'mag_iso', 'FWHM', 3, '$mag(iso)$', '$FWHM$')
plt.show(block=False)
print("First stars...")
print("")
catalog_name_fiat_corrected_stars=''
catalog_name_fiat_corrected_stars, FWHM_max_stars=stars_maker(catalog_name_corrected_good, FILE_NAME_CORRECTED)
fcat_stars_corrected=np.genfromtxt(catalog_name_fiat_corrected_stars, names=names)
ellipticity(fcat_stars_corrected, 20)
plt.show(block=False)
#(19): GALAXIES CATALOG again...
print("")
print("Second galaxies...")
print("")
catalog_name_fiat_corrected_galaxies=galaxies_maker(catalog_name_corrected_good, FILE_NAME_CORRECTED, FWHM_max_stars)
fcat_galaxies_corrected=np.genfromtxt(catalog_name_fiat_corrected_galaxies, names=names)
# (***) CHECKING FOR STARS // GALAXIES DIVISION
weights_stars=np.ones_like(fcat_stars_corrected['class_star'])/len(fcat_stars_corrected['class_star'])
weights_galaxies=np.ones_like(fcat_galaxies_corrected['class_star'])/len(fcat_galaxies_corrected['class_star'])
weights_all = np.ones_like(fcat_corrected['class_star'])/len(fcat_corrected['class_star'])
plt.figure()
plt.hist(fcat_stars_corrected['class_star'], weights = weights_stars, bins= 10, histtype='stepfilled', label ='stars')
plt.hist(fcat_galaxies_corrected['class_star'], weights = weights_galaxies, bins= 15, histtype='stepfilled', label ='galaxies')
plt.legend(loc='upper right')
plt.xlabel('$class_{star}$', labelpad=20, fontsize=20)
plt.ylabel('$Frequency$', fontsize=20)
plt.show()
plt.hist(fcat_corrected['class_star'], color= 'r', weights = weights_all, bins=50, histtype='stepfilled', label ='all')
plt.legend(loc='upper right')
plt.xlabel('$class_{star}$', labelpad=20, fontsize=20)
plt.ylabel('$Frequency$', fontsize=20)
plt.show()
#(20): ELLIPTO CATALOG and SHAPES CATALOG (only galaxies) again...
catalog_name_ellipto_stars_corrected='{}{}'.format(FILE_NAME_CORRECTED, type_ellipto_stars)
ellipto_caller(catalog_name_fiat_corrected_stars, fits, catalog_name_ellipto_stars_corrected)
catalog_name_ellipto_galaxies_corrected='{}{}'.format(FILE_NAME_CORRECTED, type_ellipto_galaxies)
ellipto_caller(catalog_name_fiat_corrected_galaxies, fits, catalog_name_ellipto_galaxies_corrected)
catalog_name_shapes_galaxies_corrected='{}{}'.format(FILE_NAME_CORRECTED, type_shapes_galaxies)
fiatfilter_errcode_galaxies_corrected='perl fiatfilter.pl -v "errcode<2" {}>{}'.format(catalog_name_ellipto_galaxies_corrected, catalog_name_shapes_galaxies_corrected)
subprocess.call(fiatfilter_errcode_galaxies_corrected, shell=True)
catalog_name_shapes_stars_corrected='{}{}'.format(FILE_NAME_CORRECTED, type_shapes_stars)
fiatfilter_errcode_stars_corrected='perl fiatfilter.pl -v "errcode<2" {}>{}'.format(catalog_name_ellipto_stars_corrected, catalog_name_shapes_stars_corrected)
subprocess.call(fiatfilter_errcode_stars_corrected, shell=True)
if __name__ == "__main__":
main()
|
gcanasherrera/Weak-Lensing
|
WL_Script.py
|
Python
|
gpl-3.0
| 23,947
|
[
"Galaxy"
] |
4ee4f6d5136f952ad573e6113eaafe46ab494c4f58de3c60eebba43d4b20e3fc
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is a self-generating script that contains all of the iso3166-1 data.
To regenerate, a CSV file must be created that contains the latest data. Here's
how to do that:
1. Visit https://www.iso.org/obp
2. Click the "Country Codes" radio option and click the search button
3. Filter by "Officially assigned codes"
4. Change the results per page to 300
5. Copy the html table and paste into Libreoffice Calc / Excel
6. Delete the French name column
7. Save as a CSV file in django_countries/iso3166-1.csv
8. Run this script from the command line
"""
from __future__ import unicode_literals
import glob
import os
try:
from django.utils.translation import ugettext_lazy as _
except ImportError: # pragma: no cover
# Allows this module to be executed without Django installed.
_ = lambda x: x
COMMON_NAMES = {
"BN": _("Brunei"),
"BO": _("Bolivia"),
"GB": _("United Kingdom"),
"IR": _("Iran"),
"KP": _("North Korea"),
"KR": _("South Korea"),
"LA": _("Laos"),
"MD": _("Moldova"),
"MK": _("Macedonia"),
"RU": _("Russia"),
"SY": _("Syria"),
"TW": _("Taiwan"),
"TZ": _("Tanzania"),
"VE": _("Venezuela"),
"VN": _("Vietnam"),
}
# Nicely titled (and translatable) country names.
COUNTRIES = {
"AF": _("Afghanistan"),
"AX": _("Åland Islands"),
"AL": _("Albania"),
"DZ": _("Algeria"),
"AS": _("American Samoa"),
"AD": _("Andorra"),
"AO": _("Angola"),
"AI": _("Anguilla"),
"AQ": _("Antarctica"),
"AG": _("Antigua and Barbuda"),
"AR": _("Argentina"),
"AM": _("Armenia"),
"AW": _("Aruba"),
"AU": _("Australia"),
"AT": _("Austria"),
"AZ": _("Azerbaijan"),
"BS": _("Bahamas"),
"BH": _("Bahrain"),
"BD": _("Bangladesh"),
"BB": _("Barbados"),
"BY": _("Belarus"),
"BE": _("Belgium"),
"BZ": _("Belize"),
"BJ": _("Benin"),
"BM": _("Bermuda"),
"BT": _("Bhutan"),
"BO": _("Bolivia (Plurinational State of)"),
"BQ": _("Bonaire, Sint Eustatius and Saba"),
"BA": _("Bosnia and Herzegovina"),
"BW": _("Botswana"),
"BV": _("Bouvet Island"),
"BR": _("Brazil"),
"IO": _("British Indian Ocean Territory"),
"BN": _("Brunei Darussalam"),
"BG": _("Bulgaria"),
"BF": _("Burkina Faso"),
"BI": _("Burundi"),
"CV": _("Cabo Verde"),
"KH": _("Cambodia"),
"CM": _("Cameroon"),
"CA": _("Canada"),
"KY": _("Cayman Islands"),
"CF": _("Central African Republic"),
"TD": _("Chad"),
"CL": _("Chile"),
"CN": _("China"),
"CX": _("Christmas Island"),
"CC": _("Cocos (Keeling) Islands"),
"CO": _("Colombia"),
"KM": _("Comoros"),
"CD": _("Congo (the Democratic Republic of the)"),
"CG": _("Congo"),
"CK": _("Cook Islands"),
"CR": _("Costa Rica"),
"CI": _("Côte d'Ivoire"),
"HR": _("Croatia"),
"CU": _("Cuba"),
"CW": _("Curaçao"),
"CY": _("Cyprus"),
"CZ": _("Czech Republic"),
"DK": _("Denmark"),
"DJ": _("Djibouti"),
"DM": _("Dominica"),
"DO": _("Dominican Republic"),
"EC": _("Ecuador"),
"EG": _("Egypt"),
"SV": _("El Salvador"),
"GQ": _("Equatorial Guinea"),
"ER": _("Eritrea"),
"EE": _("Estonia"),
"ET": _("Ethiopia"),
"FK": _("Falkland Islands [Malvinas]"),
"FO": _("Faroe Islands"),
"FJ": _("Fiji"),
"FI": _("Finland"),
"FR": _("France"),
"GF": _("French Guiana"),
"PF": _("French Polynesia"),
"TF": _("French Southern Territories"),
"GA": _("Gabon"),
"GM": _("Gambia"),
"GE": _("Georgia"),
"DE": _("Germany"),
"GH": _("Ghana"),
"GI": _("Gibraltar"),
"GR": _("Greece"),
"GL": _("Greenland"),
"GD": _("Grenada"),
"GP": _("Guadeloupe"),
"GU": _("Guam"),
"GT": _("Guatemala"),
"GG": _("Guernsey"),
"GN": _("Guinea"),
"GW": _("Guinea-Bissau"),
"GY": _("Guyana"),
"HT": _("Haiti"),
"HM": _("Heard Island and McDonald Islands"),
"VA": _("Holy See"),
"HN": _("Honduras"),
"HK": _("Hong Kong"),
"HU": _("Hungary"),
"IS": _("Iceland"),
"IN": _("India"),
"ID": _("Indonesia"),
"IR": _("Iran (Islamic Republic of)"),
"IQ": _("Iraq"),
"IE": _("Ireland"),
"IM": _("Isle of Man"),
"IL": _("Israel"),
"IT": _("Italy"),
"JM": _("Jamaica"),
"JP": _("Japan"),
"JE": _("Jersey"),
"JO": _("Jordan"),
"KZ": _("Kazakhstan"),
"KE": _("Kenya"),
"KI": _("Kiribati"),
"KP": _("Korea (the Democratic People's Republic of)"),
"KR": _("Korea (the Republic of)"),
"KW": _("Kuwait"),
"KG": _("Kyrgyzstan"),
"LA": _("Lao People's Democratic Republic"),
"LV": _("Latvia"),
"LB": _("Lebanon"),
"LS": _("Lesotho"),
"LR": _("Liberia"),
"LY": _("Libya"),
"LI": _("Liechtenstein"),
"LT": _("Lithuania"),
"LU": _("Luxembourg"),
"MO": _("Macao"),
"MK": _("Macedonia (the former Yugoslav Republic of)"),
"MG": _("Madagascar"),
"MW": _("Malawi"),
"MY": _("Malaysia"),
"MV": _("Maldives"),
"ML": _("Mali"),
"MT": _("Malta"),
"MH": _("Marshall Islands"),
"MQ": _("Martinique"),
"MR": _("Mauritania"),
"MU": _("Mauritius"),
"YT": _("Mayotte"),
"MX": _("Mexico"),
"FM": _("Micronesia (Federated States of)"),
"MD": _("Moldova (the Republic of)"),
"MC": _("Monaco"),
"MN": _("Mongolia"),
"ME": _("Montenegro"),
"MS": _("Montserrat"),
"MA": _("Morocco"),
"MZ": _("Mozambique"),
"MM": _("Myanmar"),
"NA": _("Namibia"),
"NR": _("Nauru"),
"NP": _("Nepal"),
"NL": _("Netherlands"),
"NC": _("New Caledonia"),
"NZ": _("New Zealand"),
"NI": _("Nicaragua"),
"NE": _("Niger"),
"NG": _("Nigeria"),
"NU": _("Niue"),
"NF": _("Norfolk Island"),
"MP": _("Northern Mariana Islands"),
"NO": _("Norway"),
"OM": _("Oman"),
"PK": _("Pakistan"),
"PW": _("Palau"),
"PS": _("Palestine, State of"),
"PA": _("Panama"),
"PG": _("Papua New Guinea"),
"PY": _("Paraguay"),
"PE": _("Peru"),
"PH": _("Philippines"),
"PN": _("Pitcairn"),
"PL": _("Poland"),
"PT": _("Portugal"),
"PR": _("Puerto Rico"),
"QA": _("Qatar"),
"RE": _("Réunion"),
"RO": _("Romania"),
"RU": _("Russian Federation"),
"RW": _("Rwanda"),
"BL": _("Saint Barthélemy"),
"SH": _("Saint Helena, Ascension and Tristan da Cunha"),
"KN": _("Saint Kitts and Nevis"),
"LC": _("Saint Lucia"),
"MF": _("Saint Martin (French part)"),
"PM": _("Saint Pierre and Miquelon"),
"VC": _("Saint Vincent and the Grenadines"),
"WS": _("Samoa"),
"SM": _("San Marino"),
"ST": _("Sao Tome and Principe"),
"SA": _("Saudi Arabia"),
"SN": _("Senegal"),
"RS": _("Serbia"),
"SC": _("Seychelles"),
"SL": _("Sierra Leone"),
"SG": _("Singapore"),
"SX": _("Sint Maarten (Dutch part)"),
"SK": _("Slovakia"),
"SI": _("Slovenia"),
"SB": _("Solomon Islands"),
"SO": _("Somalia"),
"ZA": _("South Africa"),
"GS": _("South Georgia and the South Sandwich Islands"),
"SS": _("South Sudan"),
"ES": _("Spain"),
"LK": _("Sri Lanka"),
"SD": _("Sudan"),
"SR": _("Suriname"),
"SJ": _("Svalbard and Jan Mayen"),
"SZ": _("Swaziland"),
"SE": _("Sweden"),
"CH": _("Switzerland"),
"SY": _("Syrian Arab Republic"),
"TW": _("Taiwan (Province of China)"),
"TJ": _("Tajikistan"),
"TZ": _("Tanzania, United Republic of"),
"TH": _("Thailand"),
"TL": _("Timor-Leste"),
"TG": _("Togo"),
"TK": _("Tokelau"),
"TO": _("Tonga"),
"TT": _("Trinidad and Tobago"),
"TN": _("Tunisia"),
"TR": _("Turkey"),
"TM": _("Turkmenistan"),
"TC": _("Turks and Caicos Islands"),
"TV": _("Tuvalu"),
"UG": _("Uganda"),
"UA": _("Ukraine"),
"AE": _("United Arab Emirates"),
"GB": _("United Kingdom of Great Britain and Northern Ireland"),
"UM": _("United States Minor Outlying Islands"),
"US": _("United States of America"),
"UY": _("Uruguay"),
"UZ": _("Uzbekistan"),
"VU": _("Vanuatu"),
"VE": _("Venezuela (Bolivarian Republic of)"),
"VN": _("Viet Nam"),
"VG": _("Virgin Islands (British)"),
"VI": _("Virgin Islands (U.S.)"),
"WF": _("Wallis and Futuna"),
"EH": _("Western Sahara"),
"YE": _("Yemen"),
"ZM": _("Zambia"),
"ZW": _("Zimbabwe"),
}
ALT_CODES = {
"AF": ("AFG", 4),
"AX": ("ALA", 248),
"AL": ("ALB", 8),
"DZ": ("DZA", 12),
"AS": ("ASM", 16),
"AD": ("AND", 20),
"AO": ("AGO", 24),
"AI": ("AIA", 660),
"AQ": ("ATA", 10),
"AG": ("ATG", 28),
"AR": ("ARG", 32),
"AM": ("ARM", 51),
"AW": ("ABW", 533),
"AU": ("AUS", 36),
"AT": ("AUT", 40),
"AZ": ("AZE", 31),
"BS": ("BHS", 44),
"BH": ("BHR", 48),
"BD": ("BGD", 50),
"BB": ("BRB", 52),
"BY": ("BLR", 112),
"BE": ("BEL", 56),
"BZ": ("BLZ", 84),
"BJ": ("BEN", 204),
"BM": ("BMU", 60),
"BT": ("BTN", 64),
"BO": ("BOL", 68),
"BQ": ("BES", 535),
"BA": ("BIH", 70),
"BW": ("BWA", 72),
"BV": ("BVT", 74),
"BR": ("BRA", 76),
"IO": ("IOT", 86),
"BN": ("BRN", 96),
"BG": ("BGR", 100),
"BF": ("BFA", 854),
"BI": ("BDI", 108),
"CV": ("CPV", 132),
"KH": ("KHM", 116),
"CM": ("CMR", 120),
"CA": ("CAN", 124),
"KY": ("CYM", 136),
"CF": ("CAF", 140),
"TD": ("TCD", 148),
"CL": ("CHL", 152),
"CN": ("CHN", 156),
"CX": ("CXR", 162),
"CC": ("CCK", 166),
"CO": ("COL", 170),
"KM": ("COM", 174),
"CD": ("COD", 180),
"CG": ("COG", 178),
"CK": ("COK", 184),
"CR": ("CRI", 188),
"CI": ("CIV", 384),
"HR": ("HRV", 191),
"CU": ("CUB", 192),
"CW": ("CUW", 531),
"CY": ("CYP", 196),
"CZ": ("CZE", 203),
"DK": ("DNK", 208),
"DJ": ("DJI", 262),
"DM": ("DMA", 212),
"DO": ("DOM", 214),
"EC": ("ECU", 218),
"EG": ("EGY", 818),
"SV": ("SLV", 222),
"GQ": ("GNQ", 226),
"ER": ("ERI", 232),
"EE": ("EST", 233),
"ET": ("ETH", 231),
"FK": ("FLK", 238),
"FO": ("FRO", 234),
"FJ": ("FJI", 242),
"FI": ("FIN", 246),
"FR": ("FRA", 250),
"GF": ("GUF", 254),
"PF": ("PYF", 258),
"TF": ("ATF", 260),
"GA": ("GAB", 266),
"GM": ("GMB", 270),
"GE": ("GEO", 268),
"DE": ("DEU", 276),
"GH": ("GHA", 288),
"GI": ("GIB", 292),
"GR": ("GRC", 300),
"GL": ("GRL", 304),
"GD": ("GRD", 308),
"GP": ("GLP", 312),
"GU": ("GUM", 316),
"GT": ("GTM", 320),
"GG": ("GGY", 831),
"GN": ("GIN", 324),
"GW": ("GNB", 624),
"GY": ("GUY", 328),
"HT": ("HTI", 332),
"HM": ("HMD", 334),
"VA": ("VAT", 336),
"HN": ("HND", 340),
"HK": ("HKG", 344),
"HU": ("HUN", 348),
"IS": ("ISL", 352),
"IN": ("IND", 356),
"ID": ("IDN", 360),
"IR": ("IRN", 364),
"IQ": ("IRQ", 368),
"IE": ("IRL", 372),
"IM": ("IMN", 833),
"IL": ("ISR", 376),
"IT": ("ITA", 380),
"JM": ("JAM", 388),
"JP": ("JPN", 392),
"JE": ("JEY", 832),
"JO": ("JOR", 400),
"KZ": ("KAZ", 398),
"KE": ("KEN", 404),
"KI": ("KIR", 296),
"KP": ("PRK", 408),
"KR": ("KOR", 410),
"KW": ("KWT", 414),
"KG": ("KGZ", 417),
"LA": ("LAO", 418),
"LV": ("LVA", 428),
"LB": ("LBN", 422),
"LS": ("LSO", 426),
"LR": ("LBR", 430),
"LY": ("LBY", 434),
"LI": ("LIE", 438),
"LT": ("LTU", 440),
"LU": ("LUX", 442),
"MO": ("MAC", 446),
"MK": ("MKD", 807),
"MG": ("MDG", 450),
"MW": ("MWI", 454),
"MY": ("MYS", 458),
"MV": ("MDV", 462),
"ML": ("MLI", 466),
"MT": ("MLT", 470),
"MH": ("MHL", 584),
"MQ": ("MTQ", 474),
"MR": ("MRT", 478),
"MU": ("MUS", 480),
"YT": ("MYT", 175),
"MX": ("MEX", 484),
"FM": ("FSM", 583),
"MD": ("MDA", 498),
"MC": ("MCO", 492),
"MN": ("MNG", 496),
"ME": ("MNE", 499),
"MS": ("MSR", 500),
"MA": ("MAR", 504),
"MZ": ("MOZ", 508),
"MM": ("MMR", 104),
"NA": ("NAM", 516),
"NR": ("NRU", 520),
"NP": ("NPL", 524),
"NL": ("NLD", 528),
"NC": ("NCL", 540),
"NZ": ("NZL", 554),
"NI": ("NIC", 558),
"NE": ("NER", 562),
"NG": ("NGA", 566),
"NU": ("NIU", 570),
"NF": ("NFK", 574),
"MP": ("MNP", 580),
"NO": ("NOR", 578),
"OM": ("OMN", 512),
"PK": ("PAK", 586),
"PW": ("PLW", 585),
"PS": ("PSE", 275),
"PA": ("PAN", 591),
"PG": ("PNG", 598),
"PY": ("PRY", 600),
"PE": ("PER", 604),
"PH": ("PHL", 608),
"PN": ("PCN", 612),
"PL": ("POL", 616),
"PT": ("PRT", 620),
"PR": ("PRI", 630),
"QA": ("QAT", 634),
"RE": ("REU", 638),
"RO": ("ROU", 642),
"RU": ("RUS", 643),
"RW": ("RWA", 646),
"BL": ("BLM", 652),
"SH": ("SHN", 654),
"KN": ("KNA", 659),
"LC": ("LCA", 662),
"MF": ("MAF", 663),
"PM": ("SPM", 666),
"VC": ("VCT", 670),
"WS": ("WSM", 882),
"SM": ("SMR", 674),
"ST": ("STP", 678),
"SA": ("SAU", 682),
"SN": ("SEN", 686),
"RS": ("SRB", 688),
"SC": ("SYC", 690),
"SL": ("SLE", 694),
"SG": ("SGP", 702),
"SX": ("SXM", 534),
"SK": ("SVK", 703),
"SI": ("SVN", 705),
"SB": ("SLB", 90),
"SO": ("SOM", 706),
"ZA": ("ZAF", 710),
"GS": ("SGS", 239),
"SS": ("SSD", 728),
"ES": ("ESP", 724),
"LK": ("LKA", 144),
"SD": ("SDN", 729),
"SR": ("SUR", 740),
"SJ": ("SJM", 744),
"SZ": ("SWZ", 748),
"SE": ("SWE", 752),
"CH": ("CHE", 756),
"SY": ("SYR", 760),
"TW": ("TWN", 158),
"TJ": ("TJK", 762),
"TZ": ("TZA", 834),
"TH": ("THA", 764),
"TL": ("TLS", 626),
"TG": ("TGO", 768),
"TK": ("TKL", 772),
"TO": ("TON", 776),
"TT": ("TTO", 780),
"TN": ("TUN", 788),
"TR": ("TUR", 792),
"TM": ("TKM", 795),
"TC": ("TCA", 796),
"TV": ("TUV", 798),
"UG": ("UGA", 800),
"UA": ("UKR", 804),
"AE": ("ARE", 784),
"GB": ("GBR", 826),
"UM": ("UMI", 581),
"US": ("USA", 840),
"UY": ("URY", 858),
"UZ": ("UZB", 860),
"VU": ("VUT", 548),
"VE": ("VEN", 862),
"VN": ("VNM", 704),
"VG": ("VGB", 92),
"VI": ("VIR", 850),
"WF": ("WLF", 876),
"EH": ("ESH", 732),
"YE": ("YEM", 887),
"ZM": ("ZMB", 894),
"ZW": ("ZWE", 716),
}
def self_generate(
output_filename, filename='iso3166-1.csv'): # pragma: no cover
"""
The following code can be used for self-generation of this file.
It requires a UTF-8 CSV file containing the short ISO name and two letter
country code as the first two columns.
"""
import csv
import re
countries = []
alt_codes = []
with open(filename, 'rb') as csv_file:
for row in csv.reader(csv_file):
name = row[0].decode('utf-8').rstrip('*')
name = re.sub(r'\(the\)', '', name)
if name:
countries.append((name, row[1].decode('utf-8')))
alt_codes.append((
row[1].decode('utf-8'),
row[2].decode('utf-8'),
int(row[3]),
))
with open(__file__, 'r') as source_file:
contents = source_file.read()
# Write countries.
bits = re.match(
'(.*\nCOUNTRIES = \{\n)(.*?)(\n\}.*)', contents, re.DOTALL).groups()
country_list = []
for name, code in countries:
name = name.replace('"', r'\"').strip()
country_list.append(
' "{code}": _("{name}"),'.format(name=name, code=code))
content = bits[0]
content += '\n'.join(country_list).encode('utf-8')
# Write alt codes.
alt_bits = re.match(
'(.*\nALT_CODES = \{\n)(.*)(\n\}.*)', bits[2], re.DOTALL).groups()
alt_list = []
for code, code3, codenum in alt_codes:
name = name.replace('"', r'\"').strip()
alt_list.append(
' "{code}": ("{code3}", {codenum}),'.format(
code=code, code3=code3, codenum=codenum))
content += alt_bits[0]
content += '\n'.join(alt_list).encode('utf-8')
content += alt_bits[2]
# Generate file.
with open(output_filename, 'wb') as output_file:
output_file.write(content)
return countries
def check_flags(verbosity=1):
files = {}
this_dir = os.path.dirname(__file__)
for path in glob.glob(os.path.join(this_dir, 'static', 'flags', '*.gif')):
files[os.path.basename(os.path.splitext(path)[0]).upper()] = path
flags_missing = set(COUNTRIES) - set(files)
if flags_missing: # pragma: no cover
print("The following country codes are missing a flag:")
for code in sorted(flags_missing):
print(" {0} ({1})".format(code, COUNTRIES[code]))
elif verbosity: # pragma: no cover
print("All country codes have flags. :)")
code_missing = set(files) - set(COUNTRIES)
# Special-case EU and __
for special_code in ('EU', '__'):
code_missing.discard(special_code)
if code_missing: # pragma: no cover
print("")
print("The following flags don't have a matching country code:")
for path in sorted(code_missing):
print(" {0}".format(path))
def check_common_names():
common_names_missing = set(COMMON_NAMES) - set(COUNTRIES)
if common_names_missing: # pragma: no cover
print("")
print(
"The following common names do not match an official country "
"code:")
for code in sorted(common_names_missing):
print(" {0}".format(code))
if __name__ == '__main__': # pragma: no cover
countries = self_generate(__file__)
print('Wrote {0} countries.'.format(len(countries)))
print("")
check_flags()
check_common_names()
|
Ali-aqrabawi/ezclinic
|
lib/django_countries/data.py
|
Python
|
mit
| 17,825
|
[
"BWA",
"VisIt"
] |
51d04f964fc4dabface476a2d4438500b5fa7cd8a487e26ebecaaa0e372bec62
|
""" Python test discovery, setup and run of test functions. """
import fnmatch
import functools
import inspect
import re
import types
import sys
import py
import pytest
from _pytest._code.code import TerminalRepr
from _pytest.mark import MarkDecorator, MarkerError
try:
import enum
except ImportError: # pragma: no cover
# Only available in Python 3.4+ or as a backport
enum = None
import _pytest
import _pytest._pluggy as pluggy
cutdir2 = py.path.local(_pytest.__file__).dirpath()
cutdir1 = py.path.local(pluggy.__file__.rstrip("oc"))
NoneType = type(None)
NOTSET = object()
isfunction = inspect.isfunction
isclass = inspect.isclass
callable = py.builtin.callable
# used to work around a python2 exception info leak
exc_clear = getattr(sys, 'exc_clear', lambda: None)
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(''))
_PY3 = sys.version_info > (3, 0)
_PY2 = not _PY3
if hasattr(inspect, 'signature'):
def _format_args(func):
return str(inspect.signature(func))
else:
def _format_args(func):
return inspect.formatargspec(*inspect.getargspec(func))
if sys.version_info[:2] == (2, 6):
def isclass(object):
""" Return true if the object is a class. Overrides inspect.isclass for
python 2.6 because it will return True for objects which always return
something on __getattr__ calls (see #1035).
Backport of https://hg.python.org/cpython/rev/35bf8f7a8edc
"""
return isinstance(object, (type, types.ClassType))
def _has_positional_arg(func):
return func.__code__.co_argcount
def filter_traceback(entry):
# entry.path might sometimes return a str object when the entry
# points to dynamically generated code
# see https://bitbucket.org/pytest-dev/py/issues/71
raw_filename = entry.frame.code.raw.co_filename
is_generated = '<' in raw_filename and '>' in raw_filename
if is_generated:
return False
# entry.path might point to an inexisting file, in which case it will
# alsso return a str object. see #1133
p = py.path.local(entry.path)
return p != cutdir1 and not p.relto(cutdir2)
def get_real_func(obj):
""" gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
while hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, 'place_as'):
obj = obj.place_as
fslineno = _pytest._code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
try:
return func.im_func
except AttributeError:
return func
def safe_getattr(object, name, default):
""" Like getattr but return default upon any Exception.
Attribute access can potentially fail for 'evil' Python objects.
See issue214
"""
try:
return getattr(object, name, default)
except Exception:
return default
class FixtureFunctionMarker:
def __init__(self, scope, params,
autouse=False, yieldctx=False, ids=None):
self.scope = scope
self.params = params
self.autouse = autouse
self.yieldctx = yieldctx
self.ids = ids
def __call__(self, function):
if isclass(function):
raise ValueError(
"class fixtures not supported (may be in the future)")
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a fixture factory function.
This decorator can be used (with or or without parameters) to define
a fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker. Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module", "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids)
def yield_fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a yield-fixture factory function
(EXPERIMENTAL).
This takes the same arguments as :py:func:`pytest.fixture` but
expects a fixture function to use a ``yield`` instead of a ``return``
statement to provide a fixture. See
http://pytest.org/en/latest/yieldfixture.html for more info.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, yieldctx=True)(scope)
else:
return FixtureFunctionMarker(scope, params, autouse,
yieldctx=True, ids=ids)
defaultfuncargprefixmarker = fixture()
def pyobj_property(name):
def get(self):
node = self.getparent(getattr(pytest, name))
if node is not None:
return node.obj
doc = "python %s object this node was collected from (can be None)." % (
name.lower(),)
return property(get, None, None, doc)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--fixtures', '--funcargs',
action="store_true", dest="showfixtures", default=False,
help="show available fixtures, sorted by plugin appearance")
parser.addini("usefixtures", type="args", default=[],
help="list of default fixtures to be used with this project")
parser.addini("python_files", type="args",
default=['test_*.py', '*_test.py'],
help="glob-style file patterns for Python test module discovery")
parser.addini("python_classes", type="args", default=["Test",],
help="prefixes or glob names for Python test class discovery")
parser.addini("python_functions", type="args", default=["test",],
help="prefixes or glob names for Python test function and "
"method discovery")
group.addoption("--import-mode", default="prepend",
choices=["prepend", "append"], dest="importmode",
help="prepend/append to sys.path when importing test modules, "
"default is to prepend.")
def pytest_cmdline_main(config):
if config.option.showfixtures:
showfixtures(config)
return 0
def pytest_generate_tests(metafunc):
# those alternative spellings are common - raise a specific error to alert
# the user
alt_spellings = ['parameterize', 'parametrise', 'parameterise']
for attr in alt_spellings:
if hasattr(metafunc.function, attr):
msg = "{0} has '{1}', spelling should be 'parametrize'"
raise MarkerError(msg.format(metafunc.function.__name__, attr))
try:
markers = metafunc.function.parametrize
except AttributeError:
return
for marker in markers:
metafunc.parametrize(*marker.args, **marker.kwargs)
def pytest_configure(config):
config.addinivalue_line("markers",
"parametrize(argnames, argvalues): call a test function multiple "
"times passing in different arguments in turn. argvalues generally "
"needs to be a list of values if argnames specifies only one name "
"or a list of tuples of values if argnames specifies multiple names. "
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
"decorated test function, one with arg1=1 and another with arg1=2."
"see http://pytest.org/latest/parametrize.html for more info and "
"examples."
)
config.addinivalue_line("markers",
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
)
def pytest_sessionstart(session):
session._fixturemanager = FixtureManager(session)
@pytest.hookimpl(trylast=True)
def pytest_namespace():
raises.Exception = pytest.fail.Exception
return {
'fixture': fixture,
'yield_fixture': yield_fixture,
'raises' : raises,
'collect': {
'Module': Module, 'Class': Class, 'Instance': Instance,
'Function': Function, 'Generator': Generator,
'_fillfuncargs': fillfixtures}
}
@fixture(scope="session")
def pytestconfig(request):
""" the pytest config object with access to command line opts."""
return request.config
@pytest.hookimpl(trylast=True)
def pytest_pyfunc_call(pyfuncitem):
testfunction = pyfuncitem.obj
if pyfuncitem._isyieldedfunction():
testfunction(*pyfuncitem._args)
else:
funcargs = pyfuncitem.funcargs
testargs = {}
for arg in pyfuncitem._fixtureinfo.argnames:
testargs[arg] = funcargs[arg]
testfunction(**testargs)
return True
def pytest_collect_file(path, parent):
ext = path.ext
if ext == ".py":
if not parent.session.isinitpath(path):
for pat in parent.config.getini('python_files'):
if path.fnmatch(pat):
break
else:
return
ihook = parent.session.gethookproxy(path)
return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
def pytest_pycollect_makemodule(path, parent):
return Module(path, parent)
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(collector, name, obj):
outcome = yield
res = outcome.get_result()
if res is not None:
raise StopIteration
# nothing was collected elsewhere, let's do it here
if isclass(obj):
if collector.istestclass(obj, name):
Class = collector._getcustomclass("Class")
outcome.force_result(Class(name, parent=collector))
elif collector.istestfunction(obj, name):
# mock seems to store unbound methods (issue473), normalize it
obj = getattr(obj, "__func__", obj)
# We need to try and unwrap the function if it's a functools.partial
# or a funtools.wrapped.
# We musn't if it's been wrapped with mock.patch (python 2 only)
if not (isfunction(obj) or isfunction(get_real_func(obj))):
collector.warn(code="C2", message=
"cannot collect %r because it is not a function."
% name, )
elif getattr(obj, "__test__", True):
if is_generator(obj):
res = Generator(name, parent=collector)
else:
res = list(collector._genfunctions(name, obj))
outcome.force_result(res)
def is_generator(func):
try:
return _pytest._code.getrawcode(func).co_flags & 32 # generator function
except AttributeError: # builtin functions have no bytecode
# assume them to not be generators
return False
class PyobjContext(object):
module = pyobj_property("Module")
cls = pyobj_property("Class")
instance = pyobj_property("Instance")
class PyobjMixin(PyobjContext):
def obj():
def fget(self):
try:
return self._obj
except AttributeError:
self._obj = obj = self._getobj()
return obj
def fset(self, value):
self._obj = value
return property(fget, fset, None, "underlying python object")
obj = obj()
def _getobj(self):
return getattr(self.parent.obj, self.name)
def getmodpath(self, stopatmodule=True, includemodule=False):
""" return python path relative to the containing module. """
chain = self.listchain()
chain.reverse()
parts = []
for node in chain:
if isinstance(node, Instance):
continue
name = node.name
if isinstance(node, Module):
assert name.endswith(".py")
name = name[:-3]
if stopatmodule:
if includemodule:
parts.append(name)
break
parts.append(name)
parts.reverse()
s = ".".join(parts)
return s.replace(".[", "[")
def _getfslineno(self):
return getfslineno(self.obj)
def reportinfo(self):
# XXX caching?
obj = self.obj
compat_co_firstlineno = getattr(obj, 'compat_co_firstlineno', None)
if isinstance(compat_co_firstlineno, int):
# nose compatibility
fspath = sys.modules[obj.__module__].__file__
if fspath.endswith(".pyc"):
fspath = fspath[:-1]
lineno = compat_co_firstlineno
else:
fspath, lineno = getfslineno(obj)
modpath = self.getmodpath()
assert isinstance(lineno, int)
return fspath, lineno, modpath
class PyCollector(PyobjMixin, pytest.Collector):
def funcnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_functions', name)
def isnosetest(self, obj):
""" Look for the __test__ attribute, which is applied by the
@nose.tools.istest decorator
"""
# We explicitly check for "is True" here to not mistakenly treat
# classes with a custom __getattr__ returning something truthy (like a
# function) as test classes.
return safe_getattr(obj, '__test__', False) is True
def classnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_classes', name)
def istestfunction(self, obj, name):
return (
(self.funcnamefilter(name) or self.isnosetest(obj)) and
safe_getattr(obj, "__call__", False) and getfixturemarker(obj) is None
)
def istestclass(self, obj, name):
return self.classnamefilter(name) or self.isnosetest(obj)
def _matches_prefix_or_glob_option(self, option_name, name):
"""
checks if the given name matches the prefix or glob-pattern defined
in ini configuration.
"""
for option in self.config.getini(option_name):
if name.startswith(option):
return True
# check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
# and fnmatch is somewhat expensive to call
elif ('*' in option or '?' in option or '[' in option) and \
fnmatch.fnmatch(name, option):
return True
return False
def collect(self):
if not getattr(self.obj, "__test__", True):
return []
# NB. we avoid random getattrs and peek in the __dict__ instead
# (XXX originally introduced from a PyPy need, still true?)
dicts = [getattr(self.obj, '__dict__', {})]
for basecls in inspect.getmro(self.obj.__class__):
dicts.append(basecls.__dict__)
seen = {}
l = []
for dic in dicts:
for name, obj in list(dic.items()):
if name in seen:
continue
seen[name] = True
res = self.makeitem(name, obj)
if res is None:
continue
if not isinstance(res, list):
res = [res]
l.extend(res)
l.sort(key=lambda item: item.reportinfo()[:2])
return l
def makeitem(self, name, obj):
#assert self.ihook.fspath == self.fspath, self
return self.ihook.pytest_pycollect_makeitem(
collector=self, name=name, obj=obj)
def _genfunctions(self, name, funcobj):
module = self.getparent(Module).obj
clscol = self.getparent(Class)
cls = clscol and clscol.obj or None
transfer_markers(funcobj, cls, module)
fm = self.session._fixturemanager
fixtureinfo = fm.getfixtureinfo(self, funcobj, cls)
metafunc = Metafunc(funcobj, fixtureinfo, self.config,
cls=cls, module=module)
methods = []
if hasattr(module, "pytest_generate_tests"):
methods.append(module.pytest_generate_tests)
if hasattr(cls, "pytest_generate_tests"):
methods.append(cls().pytest_generate_tests)
if methods:
self.ihook.pytest_generate_tests.call_extra(methods,
dict(metafunc=metafunc))
else:
self.ihook.pytest_generate_tests(metafunc=metafunc)
Function = self._getcustomclass("Function")
if not metafunc._calls:
yield Function(name, parent=self, fixtureinfo=fixtureinfo)
else:
# add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
add_funcarg_pseudo_fixture_def(self, metafunc, fm)
for callspec in metafunc._calls:
subname = "%s[%s]" %(name, callspec.id)
yield Function(name=subname, parent=self,
callspec=callspec, callobj=funcobj,
fixtureinfo=fixtureinfo,
keywords={callspec.id:True})
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname,
scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(fixturemanager, '', argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist, False, False)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def get_direct_param_fixture_func(request):
return request.param
class FuncFixtureInfo:
def __init__(self, argnames, names_closure, name2fixturedefs):
self.argnames = argnames
self.names_closure = names_closure
self.name2fixturedefs = name2fixturedefs
def _marked(func, mark):
""" Returns True if :func: is already marked with :mark:, False otherwise.
This can happen if marker is applied to class and the test file is
invoked more than once.
"""
try:
func_mark = getattr(func, mark.name)
except AttributeError:
return False
return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs
def transfer_markers(funcobj, cls, mod):
# XXX this should rather be code in the mark plugin or the mark
# plugin should merge with the python plugin.
for holder in (cls, mod):
try:
pytestmark = holder.pytestmark
except AttributeError:
continue
if isinstance(pytestmark, list):
for mark in pytestmark:
if not _marked(funcobj, mark):
mark(funcobj)
else:
if not _marked(funcobj, pytestmark):
pytestmark(funcobj)
class Module(pytest.File, PyCollector):
""" Collector for test classes and functions. """
def _getobj(self):
return self._memoizedcall('_obj', self._importtestmodule)
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Module, self).collect()
def _importtestmodule(self):
# we assume we are only called once per module
importmode = self.config.getoption("--import-mode")
try:
mod = self.fspath.pyimport(ensuresyspath=importmode)
except SyntaxError:
raise self.CollectError(
_pytest._code.ExceptionInfo().getrepr(style="short"))
except self.fspath.ImportMismatchError:
e = sys.exc_info()[1]
raise self.CollectError(
"import file mismatch:\n"
"imported module %r has this __file__ attribute:\n"
" %s\n"
"which is not the same as the test file we want to collect:\n"
" %s\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules"
% e.args
)
#print "imported test module", mod
self.config.pluginmanager.consider_module(mod)
return mod
def setup(self):
setup_module = xunitsetup(self.obj, "setUpModule")
if setup_module is None:
setup_module = xunitsetup(self.obj, "setup_module")
if setup_module is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, its probably a pytest style one
# so we pass the current module object
if _has_positional_arg(setup_module):
setup_module(self.obj)
else:
setup_module()
fin = getattr(self.obj, 'tearDownModule', None)
if fin is None:
fin = getattr(self.obj, 'teardown_module', None)
if fin is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, it's probably a pytest style one
# so we pass the current module object
if _has_positional_arg(fin):
finalizer = lambda: fin(self.obj)
else:
finalizer = fin
self.addfinalizer(finalizer)
class Class(PyCollector):
""" Collector for test methods. """
def collect(self):
if hasinit(self.obj):
self.warn("C1", "cannot collect test class %r because it has a "
"__init__ constructor" % self.obj.__name__)
return []
return [self._getcustomclass("Instance")(name="()", parent=self)]
def setup(self):
setup_class = xunitsetup(self.obj, 'setup_class')
if setup_class is not None:
setup_class = getattr(setup_class, 'im_func', setup_class)
setup_class = getattr(setup_class, '__func__', setup_class)
setup_class(self.obj)
fin_class = getattr(self.obj, 'teardown_class', None)
if fin_class is not None:
fin_class = getattr(fin_class, 'im_func', fin_class)
fin_class = getattr(fin_class, '__func__', fin_class)
self.addfinalizer(lambda: fin_class(self.obj))
class Instance(PyCollector):
def _getobj(self):
obj = self.parent.obj()
return obj
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Instance, self).collect()
def newinstance(self):
self.obj = self._getobj()
return self.obj
class FunctionMixin(PyobjMixin):
""" mixin for the code common to Function and Generator.
"""
def setup(self):
""" perform setup for this test function. """
if hasattr(self, '_preservedparent'):
obj = self._preservedparent
elif isinstance(self.parent, Instance):
obj = self.parent.newinstance()
self.obj = self._getobj()
else:
obj = self.parent.obj
if inspect.ismethod(self.obj):
setup_name = 'setup_method'
teardown_name = 'teardown_method'
else:
setup_name = 'setup_function'
teardown_name = 'teardown_function'
setup_func_or_method = xunitsetup(obj, setup_name)
if setup_func_or_method is not None:
setup_func_or_method(self.obj)
fin = getattr(obj, teardown_name, None)
if fin is not None:
self.addfinalizer(lambda: fin(self.obj))
def _prunetraceback(self, excinfo):
if hasattr(self, '_obj') and not self.config.option.fulltrace:
code = _pytest._code.Code(get_real_func(self.obj))
path, firstlineno = code.path, code.firstlineno
traceback = excinfo.traceback
ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
if ntraceback == traceback:
ntraceback = ntraceback.cut(path=path)
if ntraceback == traceback:
#ntraceback = ntraceback.cut(excludepath=cutdir2)
ntraceback = ntraceback.filter(filter_traceback)
if not ntraceback:
ntraceback = traceback
excinfo.traceback = ntraceback.filter()
# issue364: mark all but first and last frames to
# only show a single-line message for each frame
if self.config.option.tbstyle == "auto":
if len(excinfo.traceback) > 2:
for entry in excinfo.traceback[1:-1]:
entry.set_repr_style('short')
def _repr_failure_py(self, excinfo, style="long"):
if excinfo.errisinstance(pytest.fail.Exception):
if not excinfo.value.pytrace:
return py._builtin._totext(excinfo.value)
return super(FunctionMixin, self)._repr_failure_py(excinfo,
style=style)
def repr_failure(self, excinfo, outerr=None):
assert outerr is None, "XXX outerr usage is deprecated"
style = self.config.option.tbstyle
if style == "auto":
style = "long"
return self._repr_failure_py(excinfo, style=style)
class Generator(FunctionMixin, PyCollector):
def collect(self):
# test generators are seen as collectors but they also
# invoke setup/teardown on popular request
# (induced by the common "test_*" naming shared with normal tests)
self.session._setupstate.prepare(self)
# see FunctionMixin.setup and test_setupstate_is_preserved_134
self._preservedparent = self.parent.obj
l = []
seen = {}
for i, x in enumerate(self.obj()):
name, call, args = self.getcallargs(x)
if not callable(call):
raise TypeError("%r yielded non callable test %r" %(self.obj, call,))
if name is None:
name = "[%d]" % i
else:
name = "['%s']" % name
if name in seen:
raise ValueError("%r generated tests with non-unique name %r" %(self, name))
seen[name] = True
l.append(self.Function(name, self, args=args, callobj=call))
return l
def getcallargs(self, obj):
if not isinstance(obj, (tuple, list)):
obj = (obj,)
# explict naming
if isinstance(obj[0], py.builtin._basestring):
name = obj[0]
obj = obj[1:]
else:
name = None
call, args = obj[0], obj[1:]
return name, call, args
def hasinit(obj):
init = getattr(obj, '__init__', None)
if init:
if init != object.__init__:
return True
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
_notexists = object()
class CallSpec2(object):
def __init__(self, metafunc):
self.metafunc = metafunc
self.funcargs = {}
self._idlist = []
self.params = {}
self._globalid = _notexists
self._globalid_args = set()
self._globalparam = _notexists
self._arg2scopenum = {} # used for sorting parametrized resources
self.keywords = {}
self.indices = {}
def copy(self, metafunc):
cs = CallSpec2(self.metafunc)
cs.funcargs.update(self.funcargs)
cs.params.update(self.params)
cs.keywords.update(self.keywords)
cs.indices.update(self.indices)
cs._arg2scopenum.update(self._arg2scopenum)
cs._idlist = list(self._idlist)
cs._globalid = self._globalid
cs._globalid_args = self._globalid_args
cs._globalparam = self._globalparam
return cs
def _checkargnotcontained(self, arg):
if arg in self.params or arg in self.funcargs:
raise ValueError("duplicate %r" %(arg,))
def getparam(self, name):
try:
return self.params[name]
except KeyError:
if self._globalparam is _notexists:
raise ValueError(name)
return self._globalparam
@property
def id(self):
return "-".join(map(str, filter(None, self._idlist)))
def setmulti(self, valtypes, argnames, valset, id, keywords, scopenum,
param_index):
for arg,val in zip(argnames, valset):
self._checkargnotcontained(arg)
valtype_for_arg = valtypes[arg]
getattr(self, valtype_for_arg)[arg] = val
self.indices[arg] = param_index
self._arg2scopenum[arg] = scopenum
if val is _notexists:
self._emptyparamspecified = True
self._idlist.append(id)
self.keywords.update(keywords)
def setall(self, funcargs, id, param):
for x in funcargs:
self._checkargnotcontained(x)
self.funcargs.update(funcargs)
if id is not _notexists:
self._idlist.append(id)
if param is not _notexists:
assert self._globalparam is _notexists
self._globalparam = param
for arg in funcargs:
self._arg2scopenum[arg] = scopenum_function
class FuncargnamesCompatAttr:
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
return self.fixturenames
class Metafunc(FuncargnamesCompatAttr):
"""
Metafunc objects are passed to the ``pytest_generate_tests`` hook.
They help to inspect a test function and to generate tests according to
test configuration or values specified in the class or module where a
test function is defined.
:ivar fixturenames: set of fixture names required by the test function
:ivar function: underlying python test function
:ivar cls: class object where the test function is defined in or ``None``.
:ivar module: the module object where the test function is defined in.
:ivar config: access to the :class:`_pytest.config.Config` object for the
test session.
:ivar funcargnames:
.. deprecated:: 2.3
Use ``fixturenames`` instead.
"""
def __init__(self, function, fixtureinfo, config, cls=None, module=None):
self.config = config
self.module = module
self.function = function
self.fixturenames = fixtureinfo.names_closure
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
self.cls = cls
self._calls = []
self._ids = py.builtin.set()
def parametrize(self, argnames, argvalues, indirect=False, ids=None,
scope=None):
""" Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting indirect to do it rather at test setup time.
:arg argnames: a comma-separated string denoting one or more argument
names, or a list/tuple of argument strings.
:arg argvalues: The list of argvalues determines how often a
test is invoked with different argument values. If only one
argname was specified argvalues is a list of values. If N
argnames were specified, argvalues must be a list of N-tuples,
where each tuple-element specifies a value for its respective
argname.
:arg indirect: The list of argnames or boolean. A list of arguments'
names (subset of argnames). If True the list contains all names from
the argnames. Each argvalue corresponding to an argname in this list will
be passed as request.param to its respective argname fixture
function so that it can perform more expensive setups during the
setup phase of a test rather than at collection time.
:arg ids: list of string ids, or a callable.
If strings, each is corresponding to the argvalues so that they are
part of the test id.
If callable, it should take one argument (a single argvalue) and return
a string or return None. If None, the automatically generated id for that
argument will be used.
If no ids are provided they will be generated automatically from
the argvalues.
:arg scope: if specified it denotes the scope of the parameters.
The scope is used for grouping tests by parameter instances.
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
# individual parametrized argument sets can be wrapped in a series
# of markers in which case we unwrap the values and apply the mark
# at Function init
newkeywords = {}
unwrapped_argvalues = []
for i, argval in enumerate(argvalues):
while isinstance(argval, MarkDecorator):
newmark = MarkDecorator(argval.markname,
argval.args[:-1], argval.kwargs)
newmarks = newkeywords.setdefault(i, {})
newmarks[newmark.markname] = newmark
argval = argval.args[-1]
unwrapped_argvalues.append(argval)
argvalues = unwrapped_argvalues
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if len(argnames) == 1:
argvalues = [(val,) for val in argvalues]
if not argvalues:
argvalues = [(_notexists,) * len(argnames)]
if scope is None:
scope = "function"
scopenum = scopes.index(scope)
valtypes = {}
for arg in argnames:
if arg not in self.fixturenames:
raise ValueError("%r uses no fixture %r" %(self.function, arg))
if indirect is True:
valtypes = dict.fromkeys(argnames, "params")
elif indirect is False:
valtypes = dict.fromkeys(argnames, "funcargs")
elif isinstance(indirect, (tuple, list)):
valtypes = dict.fromkeys(argnames, "funcargs")
for arg in indirect:
if arg not in argnames:
raise ValueError("indirect given to %r: fixture %r doesn't exist" %(
self.function, arg))
valtypes[arg] = "params"
idfn = None
if callable(ids):
idfn = ids
ids = None
if ids and len(ids) != len(argvalues):
raise ValueError('%d tests specified with %d ids' %(
len(argvalues), len(ids)))
if not ids:
ids = idmaker(argnames, argvalues, idfn)
newcalls = []
for callspec in self._calls or [CallSpec2(self)]:
for param_index, valset in enumerate(argvalues):
assert len(valset) == len(argnames)
newcallspec = callspec.copy(self)
newcallspec.setmulti(valtypes, argnames, valset, ids[param_index],
newkeywords.get(param_index, {}), scopenum,
param_index)
newcalls.append(newcallspec)
self._calls = newcalls
def addcall(self, funcargs=None, id=_notexists, param=_notexists):
""" (deprecated, use parametrize) Add a new call to the underlying
test function during the collection phase of a test run. Note that
request.addcall() is called during the test collection phase prior and
independently to actual test execution. You should only use addcall()
if you need to specify multiple arguments of a test function.
:arg funcargs: argument keyword dictionary used when invoking
the test function.
:arg id: used for reporting and identification purposes. If you
don't supply an `id` an automatic unique id will be generated.
:arg param: a parameter which will be exposed to a later fixture function
invocation through the ``request.param`` attribute.
"""
assert funcargs is None or isinstance(funcargs, dict)
if funcargs is not None:
for name in funcargs:
if name not in self.fixturenames:
pytest.fail("funcarg %r not used in this function." % name)
else:
funcargs = {}
if id is None:
raise ValueError("id=None not allowed")
if id is _notexists:
id = len(self._calls)
id = str(id)
if id in self._ids:
raise ValueError("duplicate id %r" % id)
self._ids.add(id)
cs = CallSpec2(self)
cs.setall(funcargs, id, param)
self._calls.append(cs)
if _PY3:
import codecs
def _escape_bytes(val):
"""
If val is pure ascii, returns it as a str(), otherwise escapes
into a sequence of escaped bytes:
b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6'
note:
the obvious "v.decode('unicode-escape')" will return
valid utf-8 unicode if it finds them in the string, but we
want to return escaped bytes for any byte, even if they match
a utf-8 string.
"""
if val:
# source: http://goo.gl/bGsnwC
encoded_bytes, _ = codecs.escape_encode(val)
return encoded_bytes.decode('ascii')
else:
# empty bytes crashes codecs.escape_encode (#1087)
return ''
else:
def _escape_bytes(val):
"""
In py2 bytes and str are the same type, so return it unchanged if it
is a full ascii string, otherwise escape it into its binary form.
"""
try:
return val.decode('ascii')
except UnicodeDecodeError:
return val.encode('string-escape')
def _idval(val, argname, idx, idfn):
if idfn:
try:
s = idfn(val)
if s:
return s
except Exception:
pass
if isinstance(val, bytes):
return _escape_bytes(val)
elif isinstance(val, (float, int, str, bool, NoneType)):
return str(val)
elif isinstance(val, REGEX_TYPE):
return _escape_bytes(val.pattern) if isinstance(val.pattern, bytes) else val.pattern
elif enum is not None and isinstance(val, enum.Enum):
return str(val)
elif isclass(val) and hasattr(val, '__name__'):
return val.__name__
elif _PY2 and isinstance(val, unicode):
# special case for python 2: if a unicode string is
# convertible to ascii, return it as an str() object instead
try:
return str(val)
except UnicodeError:
# fallthrough
pass
return str(argname)+str(idx)
def _idvalset(idx, valset, argnames, idfn):
this_id = [_idval(val, argname, idx, idfn)
for val, argname in zip(valset, argnames)]
return "-".join(this_id)
def idmaker(argnames, argvalues, idfn=None):
ids = [_idvalset(valindex, valset, argnames, idfn)
for valindex, valset in enumerate(argvalues)]
if len(set(ids)) < len(ids):
# user may have provided a bad idfn which means the ids are not unique
ids = [str(i) + testid for i, testid in enumerate(ids)]
return ids
def showfixtures(config):
from _pytest.main import wrap_session
return wrap_session(config, _showfixtures_main)
def _showfixtures_main(config, session):
import _pytest.config
session.perform_collect()
curdir = py.path.local()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue("verbose")
fm = session._fixturemanager
available = []
for argname, fixturedefs in fm._arg2fixturedefs.items():
assert fixturedefs is not None
if not fixturedefs:
continue
fixturedef = fixturedefs[-1]
loc = getlocation(fixturedef.func, curdir)
available.append((len(fixturedef.baseid),
fixturedef.func.__module__,
curdir.bestrelpath(loc),
fixturedef.argname, fixturedef))
available.sort()
currentmodule = None
for baseid, module, bestrel, argname, fixturedef in available:
if currentmodule != module:
if not module.startswith("_pytest."):
tw.line()
tw.sep("-", "fixtures defined from %s" %(module,))
currentmodule = module
if verbose <= 0 and argname[0] == "_":
continue
if verbose > 0:
funcargspec = "%s -- %s" %(argname, bestrel,)
else:
funcargspec = argname
tw.line(funcargspec, green=True)
loc = getlocation(fixturedef.func, curdir)
doc = fixturedef.func.__doc__ or ""
if doc:
for line in doc.strip().split("\n"):
tw.line(" " + line.strip())
else:
tw.line(" %s: no docstring available" %(loc,),
red=True)
def getlocation(function, curdir):
import inspect
fn = py.path.local(inspect.getfile(function))
lineno = py.builtin._getcode(function).co_firstlineno
if fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" %(fn, lineno+1)
# builtin pytest.raises helper
def raises(expected_exception, *args, **kwargs):
""" assert that a code block/function call raises ``expected_exception``
and raise a failure exception otherwise.
This helper produces a ``ExceptionInfo()`` object (see below).
If using Python 2.5 or above, you may use this function as a
context manager::
>>> with raises(ZeroDivisionError):
... 1/0
.. note::
When using ``pytest.raises`` as a context manager, it's worthwhile to
note that normal context manager rules apply and that the exception
raised *must* be the final line in the scope of the context manager.
Lines of code after that, within the scope of the context manager will
not be executed. For example::
>>> with raises(OSError) as exc_info:
assert 1 == 1 # this will execute as expected
raise OSError(errno.EEXISTS, 'directory exists')
assert exc_info.value.errno == errno.EEXISTS # this will not execute
Instead, the following approach must be taken (note the difference in
scope)::
>>> with raises(OSError) as exc_info:
assert 1 == 1 # this will execute as expected
raise OSError(errno.EEXISTS, 'directory exists')
assert exc_info.value.errno == errno.EEXISTS # this will now execute
Or you can specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
A third possibility is to use a string to be executed::
>>> raises(ZeroDivisionError, "f(0)")
<ExceptionInfo ...>
.. autoclass:: _pytest._code.ExceptionInfo
:members:
.. note::
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run. See the
official Python ``try`` statement documentation for more detailed
information.
"""
__tracebackhide__ = True
if expected_exception is AssertionError:
# we want to catch a AssertionError
# replace our subclass with the builtin one
# see https://github.com/pytest-dev/pytest/issues/176
from _pytest.assertion.util import BuiltinAssertionError \
as expected_exception
msg = ("exceptions must be old-style classes or"
" derived from BaseException, not %s")
if isinstance(expected_exception, tuple):
for exc in expected_exception:
if not isclass(exc):
raise TypeError(msg % type(exc))
elif not isclass(expected_exception):
raise TypeError(msg % type(expected_exception))
if not args:
return RaisesContext(expected_exception)
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
#print "raises frame scope: %r" % frame.f_locals
try:
code = _pytest._code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
# XXX didn'T mean f_globals == f_locals something special?
# this is destroyed here ...
except expected_exception:
return _pytest._code.ExceptionInfo()
else:
func = args[0]
try:
func(*args[1:], **kwargs)
except expected_exception:
return _pytest._code.ExceptionInfo()
pytest.fail("DID NOT RAISE {0}".format(expected_exception))
class RaisesContext(object):
def __init__(self, expected_exception):
self.expected_exception = expected_exception
self.excinfo = None
def __enter__(self):
self.excinfo = object.__new__(_pytest._code.ExceptionInfo)
return self.excinfo
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
pytest.fail("DID NOT RAISE")
if sys.version_info < (2, 7):
# py26: on __exit__() exc_value often does not contain the
# exception value.
# http://bugs.python.org/issue7853
if not isinstance(tp[1], BaseException):
exc_type, value, traceback = tp
tp = exc_type, exc_type(value), traceback
self.excinfo.__init__(tp)
return issubclass(self.excinfo.type, self.expected_exception)
#
# the basic pytest Function item
#
class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr):
""" a Function Item is responsible for setting up and executing a
Python test function.
"""
_genid = None
def __init__(self, name, parent, args=None, config=None,
callspec=None, callobj=NOTSET, keywords=None, session=None,
fixtureinfo=None):
super(Function, self).__init__(name, parent, config=config,
session=session)
self._args = args
if callobj is not NOTSET:
self.obj = callobj
self.keywords.update(self.obj.__dict__)
if callspec:
self.callspec = callspec
self.keywords.update(callspec.keywords)
if keywords:
self.keywords.update(keywords)
if fixtureinfo is None:
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
self.parent, self.obj, self.cls,
funcargs=not self._isyieldedfunction())
self._fixtureinfo = fixtureinfo
self.fixturenames = fixtureinfo.names_closure
self._initrequest()
def _initrequest(self):
self.funcargs = {}
if self._isyieldedfunction():
assert not hasattr(self, "callspec"), (
"yielded functions (deprecated) cannot have funcargs")
else:
if hasattr(self, "callspec"):
callspec = self.callspec
assert not callspec.funcargs
self._genid = callspec.id
if hasattr(callspec, "param"):
self.param = callspec.param
self._request = FixtureRequest(self)
@property
def function(self):
"underlying python 'function' object"
return getattr(self.obj, 'im_func', self.obj)
def _getobj(self):
name = self.name
i = name.find("[") # parametrization
if i != -1:
name = name[:i]
return getattr(self.parent.obj, name)
@property
def _pyfuncitem(self):
"(compatonly) for code expecting pytest-2.2 style request objects"
return self
def _isyieldedfunction(self):
return getattr(self, "_args", None) is not None
def runtest(self):
""" execute the underlying test function. """
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
def setup(self):
# check if parametrization happend with an empty list
try:
self.callspec._emptyparamspecified
except AttributeError:
pass
else:
fs, lineno = self._getfslineno()
pytest.skip("got empty parameter set, function %s at %s:%d" %(
self.function.__name__, fs, lineno))
super(Function, self).setup()
fillfixtures(self)
scope2props = dict(session=())
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance", )
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError("%s not available in %s-scoped context" % (
scopename, self.scope))
return property(provide, None, None, func.__doc__)
return decoratescope
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "cls", "module", "session"
self.scope = "function"
self._funcargs = {}
self._fixturedefs = {}
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self.fixturenames = fixtureinfo.names_closure
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a a dynamic call to
# getfuncargvalue(argname) usage which was naturally
# not known at parsing/collection time
fixturedefs = self._fixturemanager.getfixturedefs(
argname, self._pyfuncitem.parent.nodeid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(pytest.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
if function is not None:
return py.builtin._getimself(function)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(pytest.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
try:
self.node.keywords[marker.markname] = marker
except AttributeError:
raise ValueError(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfuncargvalue(argname)
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" (deprecated) Return a testing resource managed by ``setup`` &
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
``teardown`` function will be called so that subsequent calls to
``setup`` would recreate the resource. With pytest-2.3 you often
do not need ``cached_setup()`` as you can directly declare a scope
on a fixture function and register a finalizer through
``request.addfinalizer()``.
:arg teardown: function receiving a previously setup resource.
:arg setup: a no-argument function creating a resource.
:arg scope: a string value out of ``function``, ``class``, ``module``
or ``session`` indicating the caching lifecycle of the resource.
:arg extrakey: added to internal caching key of (funcargname, scope).
"""
if not hasattr(self.config, '_setupcache'):
self.config._setupcache = {} # XXX weakref?
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
cache = self.config._setupcache
try:
val = cache[cachekey]
except KeyError:
self._check_scope(self.fixturename, self.scope, scope)
val = setup()
cache[cachekey] = val
if teardown is not None:
def finalizer():
del cache[cachekey]
teardown(val)
self._addfinalizer(finalizer, scope=scope)
return val
def getfuncargvalue(self, argname):
""" Dynamically retrieve a named fixture function argument.
As of pytest-2.3, it is easier and usually better to access other
fixture values by stating it as an input argument in the fixture
function. If you only can decide about using another fixture at test
setup time, you may use this function to retrieve it inside a fixture
function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def _get_active_fixturedef(self, argname):
try:
return self._fixturedefs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
class PseudoFixtureDef:
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef
raise
# remove indent to prevent the python3 exception
# from leaking into the call
result = self._getfuncargvalue(fixturedef)
self._funcargs[argname] = result
self._fixturedefs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
l = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
l.reverse()
return l
l.append(fixturedef)
current = current._parent_request
def _getfuncargvalue(self, fixturedef):
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if its not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
val = fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(fixturedef.finish,
subrequest.node)
return val
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
pytest.fail("ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s" %(
(requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = _format_args(factory)
lines.append("%s:%d: def %s%s" %(
p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node
return node
def __repr__(self):
return "<FixtureRequest for %r>" %(self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self.addfinalizer = fixturedef.addfinalizer
self._pyfuncitem = request._pyfuncitem
self._funcargs = request._funcargs
self._fixturedefs = request._fixturedefs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self.fixturenames = request.fixturenames
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
class ScopeMismatchError(Exception):
""" A fixture function tries to use a different fixture function which
which has a lower scope (e.g. a Session one calls a function one)
"""
scopes = "session module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
# the last fixture raise an error, let's present
# it at the requesting side
stack = stack[:-1]
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except (IOError, IndexError):
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno+1))
else:
addline("file %s, line %s" % (fspath, lineno+1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith('def'):
break
if msg is None:
fm = self.request._fixturemanager
available = []
for name, fixturedef in fm._arg2fixturedefs.items():
parentid = self.request._pyfuncitem.parent.nodeid
faclist = list(fm._matchfactories(fixturedef, parentid))
if faclist:
available.append(name)
msg = "fixture %r not found" % (self.argname,)
msg += "\n available fixtures: %s" %(", ".join(available),)
msg += "\n use 'py.test --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
#tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
for line in self.errorstring.split("\n"):
tw.line(" " + line.strip(), red=True)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno+1))
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
_argprefix = "pytest_funcarg__"
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not hasattr(node, "nofuncargs"):
if cls is not None:
startindex = 1
else:
startindex = None
argnames = getfuncargnames(func, startindex)
else:
argnames = ()
usefixtures = getattr(func, "usefixtures", None)
initialnames = argnames
if usefixtures is not None:
initialnames = usefixtures.args + initialnames
fm = node.session._fixturemanager
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
node)
return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__)
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != "/":
nodeid = nodeid.replace(p.sep, "/")
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i:i+1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
# make sure autousenames are sorted by scope, scopenum 0 is session
autousenames.sort(
key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return a arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
return fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
# skip directly parametrized arguments
argnames = func_params[0]
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if argname not in func_params and argname not in argnames:
metafunc.parametrize(argname, fixturedef.params,
indirect=True, scope=fixturedef.scope,
ids=fixturedef.ids)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
obj = getattr(holderobj, name, None)
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked
marker = getfixturemarker(obj)
if marker is None:
if not name.startswith(self._argprefix):
continue
if not callable(obj):
continue
marker = defaultfuncargprefixmarker
name = name[len(self._argprefix):]
elif not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
else:
assert not name.startswith(self._argprefix)
fixturedef = FixtureDef(self, nodeid, name, obj,
marker.scope, marker.params,
yieldctx=marker.yieldctx,
unittest=unittest, ids=marker.ids)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixturedef.has_location:
faclist.append(fixturedef)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixturedef)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or '', autousenames))
def getfixturedefs(self, argname, nodeid):
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
else:
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodeid.startswith(fixturedef.baseid):
yield fixturedef
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno+1)
source = _pytest._code.Source(fixturefunc)
pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs, yieldctx):
if yieldctx:
if not is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="yield_fixture requires yield statement in function")
iter = fixturefunc(**kwargs)
next = getattr(iter, "__next__", None)
if next is None:
next = getattr(iter, "next")
res = next()
def teardown():
try:
next()
except StopIteration:
pass
else:
fail_fixturefunc(fixturefunc,
"yield_fixture function has more than one 'yield'")
request.addfinalizer(teardown)
else:
if is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="pytest.fixture functions cannot use ``yield``. "
"Instead write and return an inner function/generator "
"and let the consumer call and iterate over it.")
res = fixturefunc(**kwargs)
return res
class FixtureDef:
""" A container for a factory definition. """
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
yieldctx, unittest=False, ids=None):
self._fixturemanager = fixturemanager
self.baseid = baseid or ''
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scopes.index(scope or "function")
self.params = params
startindex = unittest and 1 or None
self.argnames = getfuncargnames(func, startindex=startindex)
self.yieldctx = yieldctx
self.unittest = unittest
self.ids = ids
self._finalizer = []
def addfinalizer(self, finalizer):
self._finalizer.append(finalizer)
def finish(self):
try:
while self._finalizer:
func = self._finalizer.pop()
func()
finally:
# even if finalization fails, we invalidate
# the cached fixture value
if hasattr(self, "cached_result"):
del self.cached_result
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
kwargs = {}
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixturedef.cached_result
request._check_scope(argname, request.scope, fixturedef.scope)
kwargs[argname] = result
if argname != "request":
fixturedef.addfinalizer(self.finish)
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
py.builtin._reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish()
assert not hasattr(self, "cached_result")
fixturefunc = self.func
if self.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = self.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "self" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(self.func)
if fixturefunc != self.func:
fixturefunc = fixturefunc.__get__(request.instance)
try:
result = call_fixture_func(fixturefunc, request, kwargs,
self.yieldctx)
except Exception:
self.cached_result = (None, my_cache_key, sys.exc_info())
raise
self.cached_result = (result, my_cache_key, None)
return result
def __repr__(self):
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
(self.argname, self.scope, self.baseid))
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
if mock is not None:
return len([p for p in patchings
if not p.attribute_name and p.new is mock.DEFAULT])
return len(patchings)
def getfuncargnames(function, startindex=None):
# XXX merge with main.py's varnames
#assert not isclass(function)
realfunction = function
while hasattr(realfunction, "__wrapped__"):
realfunction = realfunction.__wrapped__
if startindex is None:
startindex = inspect.ismethod(function) and 1 or 0
if realfunction != function:
startindex += num_mock_patch_args(function)
function = realfunction
if isinstance(function, functools.partial):
argnames = inspect.getargs(_pytest._code.getrawcode(function.func))[0]
partial = function
argnames = argnames[len(partial.args):]
if partial.keywords:
for kw in partial.keywords:
argnames.remove(kw)
else:
argnames = inspect.getargs(_pytest._code.getrawcode(function))[0]
defaults = getattr(function, 'func_defaults',
getattr(function, '__defaults__', None)) or ()
numdefaults = len(defaults)
if numdefaults:
return tuple(argnames[startindex:-numdefaults])
return tuple(argnames[startindex:])
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
for item in items:
keys = set(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
return reorder_items_atscope(items, set(), argkeys_cache, 0)
def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
items_done = []
while 1:
items_before, items_same, items_other, newignore = \
slice_items(items, ignore, argkeys_cache[scopenum])
items_before = reorder_items_atscope(
items_before, ignore, argkeys_cache,scopenum+1)
if items_same is None:
# nothing to reorder in this scope
assert items_other is None
return items_done + items_before
items_done.extend(items_before)
items = items_same + items_other
ignore = newignore
def slice_items(items, ignore, scoped_argkeys_cache):
# we pick the first item which uses a fixture instance in the
# requested scope and which we haven't seen yet. We slice the input
# items list into a list of items_nomatch, items_same and
# items_other
if scoped_argkeys_cache: # do we need to do work at all?
it = iter(items)
# first find a slicing key
for i, item in enumerate(it):
argkeys = scoped_argkeys_cache.get(item)
if argkeys is not None:
argkeys = argkeys.difference(ignore)
if argkeys: # found a slicing key
slicing_argkey = argkeys.pop()
items_before = items[:i]
items_same = [item]
items_other = []
# now slice the remainder of the list
for item in it:
argkeys = scoped_argkeys_cache.get(item)
if argkeys and slicing_argkey in argkeys and \
slicing_argkey not in ignore:
items_same.append(item)
else:
items_other.append(item)
newignore = ignore.copy()
newignore.add(slicing_argkey)
return (items_before, items_same, items_other, newignore)
return items, None, None, None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indictes.items() is random order of argnames but
# then again different functions (items) can change order of
# arguments so it doesn't matter much probably
for argname, param_index in cs.indices.items():
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # module
key = (argname, param_index, item.fspath)
elif scopenum == 2: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
def xunitsetup(obj, name):
meth = getattr(obj, name, None)
if getfixturemarker(meth) is None:
return meth
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except KeyboardInterrupt:
raise
except Exception:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
scopename2class = {
'class': Class,
'module': Module,
'function': pytest.Item,
}
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
if scope == "session":
return node.session
raise ValueError("unknown scope")
return node.getparent(cls)
|
youtube/cobalt
|
third_party/web_platform_tests/tools/pytest/_pytest/python.py
|
Python
|
bsd-3-clause
| 89,406
|
[
"VisIt"
] |
7829475370dbb8910798605ae98c04b5b47b7ddf458f829aefcb3cb913380282
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to configure .deb packages.
(c) 2014, Brian Coca <briancoca+ansible@gmail.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: debconf
short_description: Configure a .deb package
description:
- Configure a .deb package using debconf-set-selections. Or just query
existing selections.
version_added: "1.6"
notes:
- This module requires the command line debconf tools.
- A number of questions have to be answered (depending on the package).
Use 'debconf-show <package>' on any Debian or derivative with the package
installed to see questions/settings available.
- Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords.
requirements: [ debconf, debconf-utils ]
options:
name:
description:
- Name of package to configure.
required: true
default: null
aliases: ['pkg']
question:
description:
- A debconf configuration setting
required: false
default: null
aliases: ['setting', 'selection']
vtype:
description:
- The type of the value supplied
required: false
default: null
choices: [string, password, boolean, select, multiselect, note, error, title, text]
aliases: []
value:
description:
- Value to set the configuration to
required: false
default: null
aliases: ['answer']
unseen:
description:
- Do not set 'seen' flag when pre-seeding
required: false
default: False
aliases: []
author: Brian Coca
'''
EXAMPLES = '''
# Set default locale to fr_FR.UTF-8
debconf: name=locales question='locales/default_environment_locale' value=fr_FR.UTF-8 vtype='select'
# set to generate locales:
debconf: name=locales question='locales/locales_to_be_generated' value='en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8' vtype='multiselect'
# Accept oracle license
debconf: name='oracle-java7-installer' question='shared/accepted-oracle-license-v1-1' value='true' vtype='select'
# Specifying package you can register/return the list of questions and current values
debconf: name='tzdata'
'''
def get_selections(module, pkg):
cmd = [module.get_bin_path('debconf-show', True), pkg]
rc, out, err = module.run_command(' '.join(cmd))
if rc != 0:
module.fail_json(msg=err)
selections = {}
for line in out.splitlines():
(key, value) = line.split(':', 1)
selections[ key.strip('*').strip() ] = value.strip()
return selections
def set_selection(module, pkg, question, vtype, value, unseen):
setsel = module.get_bin_path('debconf-set-selections', True)
cmd = [setsel]
if unseen:
cmd.append('-u')
data = ' '.join([pkg, question, vtype, value])
return module.run_command(cmd, data=data)
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, aliases=['pkg'], type='str'),
question = dict(required=False, aliases=['setting', 'selection'], type='str'),
vtype = dict(required=False, type='str', choices=['string', 'password', 'boolean', 'select', 'multiselect', 'note', 'error', 'title', 'text']),
value= dict(required=False, type='str'),
unseen = dict(required=False, type='bool'),
),
required_together = ( ['question','vtype', 'value'],),
supports_check_mode=True,
)
#TODO: enable passing array of options and/or debconf file from get-selections dump
pkg = module.params["name"]
question = module.params["question"]
vtype = module.params["vtype"]
value = module.params["value"]
unseen = module.params["unseen"]
prev = get_selections(module, pkg)
changed = False
msg = ""
if question is not None:
if vtype is None or value is None:
module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
if not question in prev or prev[question] != value:
changed = True
if changed:
if not module.check_mode:
rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen)
if rc:
module.fail_json(msg=e)
curr = { question: value }
if question in prev:
prev = {question: prev[question]}
else:
prev[question] = ''
module.exit_json(changed=changed, msg=msg, current=curr, previous=prev)
module.exit_json(changed=changed, msg=msg, current=prev)
# import module snippets
from ansible.module_utils.basic import *
main()
|
smashwilson/ansible-modules-extras
|
system/debconf.py
|
Python
|
gpl-3.0
| 5,256
|
[
"Brian"
] |
5f476f2a08e03208635312146e19d3d6b63a6f12983cab40dc5a15c09a44f63c
|
"""
(c) RIKEN 2015. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
from yamtbx.dataproc.auto.command_line import multi_check_cell_consistency
from yamtbx.dataproc.auto.command_line.run_all_xds_simple import run_xds
from yamtbx.dataproc.xds.xds_ascii import XDS_ASCII
from yamtbx.dataproc.xds import files as xds_files
from yamtbx.dataproc.xds import correctlp
from yamtbx.dataproc.xds import modify_xdsinp, make_backup, revert_files
from yamtbx.dataproc.xds.xparm import XPARM
from yamtbx import util
import iotbx.phil
import libtbx.phil
from libtbx import easy_mp
from libtbx.utils import multi_out
from cctbx.crystal import reindex
from cctbx import crystal
from cctbx import sgtbx
from cctbx.array_family import flex
import os
import time
import sys
import shutil
import numpy
import traceback
import tempfile
import glob
from cStringIO import StringIO
master_params_str = """
xdsdir = None
.type = path
.multiple = true
.help = top directory containing xds results
workdir = None
.type = path
.help = ""
unit_cell = None
.type = floats(size=6)
space_group = None
.type = str
.help = Choose the space group (name or number).
group_choice = None
.type = int
.help = Choose the group (run once and choose).
cell_method = *reindex refine
.type = choice(multi=False)
.help = ""
nproc = 1
.type = int
prep_dials_files = True
.type = bool
copy_into_workdir = True
.type = bool
cell_grouping {
tol_length = None
.type = float
.help = relative_length_tolerance
tol_angle = None
.type = float
.help = absolute_angle_tolerance in degree
}
"""
def prepare_dials_files(wd, out, space_group=None, reindex_op=None, moveto=None):
try:
from yamtbx.dataproc.dials.command_line import import_xds_for_refine
files = import_xds_for_refine.run(xds_inp=os.path.join(wd, "XDS.INP"),
xparm=os.path.join(wd, "XPARM.XDS"),
integrate_lp=os.path.join(wd, "INTEGRATE.LP"),
integrate_hkl=os.path.join(wd, "INTEGRATE.HKL"),
spot_xds=os.path.join(wd, "SPOT.XDS"),
space_group=space_group, reindex_op=reindex_op,
out_dir=wd)
if moveto and wd!=moveto:
for f in files: shutil.move(f, moveto)
except:
print >>out, "Error in generation of dials files in %s" % wd
print >>out, traceback.format_exc()
# prepare_dials_files()
def rescale_with_specified_symm_worker(sym_wd_wdr, topdir, log_out, reference_symm, sgnum, sgnum_laue, prep_dials_files=False):
# XXX Unsafe if multiple processes run this function for the same target directory at the same time
sym, wd, wdr = sym_wd_wdr
out = StringIO()
print >>out, os.path.relpath(wd, topdir),
# Find appropriate data # XXX not works for DIALS data!!
xac_file = util.return_first_found_file(("XDS_ASCII.HKL_noscale.org", "XDS_ASCII.HKL_noscale",
"XDS_ASCII_fullres.HKL.org", "XDS_ASCII_fullres.HKL",
"XDS_ASCII.HKL.org", "XDS_ASCII.HKL"),
wd=wd)
if xac_file is None:
print >>out, "Can't find XDS_ASCII file in %s" % wd
log_out.write(out.getvalue())
log_out.flush()
return (wd, None)
xac = XDS_ASCII(xac_file, read_data=False)
print >>out, "%s %s (%s)" % (os.path.basename(xac_file), xac.symm.space_group_info(),
",".join(map(lambda x: "%.2f"%x, xac.symm.unit_cell().parameters())))
if xac.symm.reflection_intensity_symmetry(False).space_group_info().type().number() == sgnum_laue:
if xac.symm.unit_cell().is_similar_to(reference_symm.unit_cell(), 0.1, 10):
print >>out, " Already scaled with specified symmetry"
log_out.write(out.getvalue())
log_out.flush()
if wd != wdr: shutil.copy2(xac_file, wdr)
if prep_dials_files: prepare_dials_files(wd, out, moveto=wdr)
return (wdr, (numpy.array(xac.symm.unit_cell().parameters()),
os.path.join(wdr, os.path.basename(xac_file))))
xdsinp = os.path.join(wd, "XDS.INP")
cosets = reindex.reindexing_operators(reference_symm, xac.symm, 0.2, 20)
if len(cosets.combined_cb_ops())==0:
print >>out, "Can't find operator:"
sym.show_summary(out, " ")
reference_symm.show_summary(out, " ")
log_out.write(out.getvalue())
log_out.flush()
return (wdr, None)
newcell = reference_symm.space_group().average_unit_cell(xac.symm.change_basis(cosets.combined_cb_ops()[0]).unit_cell())
newcell = " ".join(map(lambda x: "%.3f"%x, newcell.parameters()))
print >>out, "Scaling with transformed cell:", newcell
#for f in xds_files.generated_by_CORRECT:
# util.rotate_file(os.path.join(wd, f))
bk_prefix = make_backup(xds_files.generated_by_CORRECT, wdir=wd, quiet=True)
modify_xdsinp(xdsinp, inp_params=[("JOB", "CORRECT"),
("SPACE_GROUP_NUMBER", "%d"%sgnum),
("UNIT_CELL_CONSTANTS", newcell),
("INCLUDE_RESOLUTION_RANGE", "50 0"),
("CORRECTIONS", ""),
("NBATCH", "1"),
("MINIMUM_I/SIGMA", None), # use default
("REFINE(CORRECT)", None), # use default
])
run_xds(wd)
for f in ("XDS.INP", "CORRECT.LP", "XDS_ASCII.HKL", "GXPARM.XDS"):
if os.path.exists(os.path.join(wd, f)):
shutil.copyfile(os.path.join(wd, f), os.path.join(wdr, f+"_rescale"))
revert_files(xds_files.generated_by_CORRECT, bk_prefix, wdir=wd, quiet=True)
new_xac = os.path.join(wdr, "XDS_ASCII.HKL_rescale")
if prep_dials_files:
prepare_dials_files(wd, out,
space_group=reference_symm.space_group(),
reindex_op=cosets.combined_cb_ops()[0],
moveto=wdr)
ret = None
if os.path.isfile(new_xac):
ret = (XDS_ASCII(new_xac, read_data=False).symm.unit_cell().parameters(), new_xac)
print >>out, " OK:", ret[0]
else:
print >>out, "Error: rescaling failed (Can't find XDS_ASCII.HKL)"
return (wd, ret)
# rescale_with_specified_symm_worker()
def rescale_with_specified_symm(topdir, dirs, symms, out, sgnum=None, reference_symm=None, nproc=1, prep_dials_files=False, copyto_root=None):
assert (sgnum, reference_symm).count(None) == 1
if sgnum is not None:
sgnum_laue = sgtbx.space_group_info(sgnum).group().build_derived_reflection_intensity_group(False).type().number()
matches = filter(lambda x:x.reflection_intensity_symmetry(False).space_group_info().type().number()==sgnum_laue, symms)
matched_cells = numpy.array(map(lambda x: x.unit_cell().parameters(), matches))
median_cell = map(lambda x: numpy.median(matched_cells[:,x]), xrange(6))
reference_symm = crystal.symmetry(median_cell, sgnum)
else:
sgnum = reference_symm.space_group_info().type().number()
sgnum_laue = reference_symm.space_group().build_derived_reflection_intensity_group(False).type().number()
print >>out
print >>out, "Re-scaling with specified symmetry:", reference_symm.space_group_info().symbol_and_number()
print >>out, " reference cell:", reference_symm.unit_cell()
print >>out
print >>out
out.flush()
st_time = time.time()
wd_ret = []
if copyto_root:
for wd in dirs:
assert wd.startswith(os.path.join(topdir, ""))
tmp = os.path.join(copyto_root, os.path.relpath(wd, topdir))
if not os.path.exists(tmp): os.makedirs(tmp)
wd_ret.append(tmp)
else:
wd_ret = dirs
ret = easy_mp.pool_map(fixed_func=lambda x: rescale_with_specified_symm_worker(x, topdir, out, reference_symm, sgnum, sgnum_laue, prep_dials_files),
args=zip(symms, dirs, wd_ret), processes=nproc)
cells = dict(filter(lambda x: x[1] is not None, ret)) # cell and file
print >>out, "\nTotal wall-clock time for reindexing: %.2f sec (using %d cores)." % (time.time()-st_time, nproc)
return cells, reference_symm
# rescale_with_specified_symm()
def reindex_with_specified_symm_worker(wd, wdr, topdir, log_out, reference_symm, sgnum_laue, prep_dials_files=False):
"""
wd: directory where XDS file exists
wdr: wd to return; a directory where transformed file should be saved.
If wd!=wdr, files in wd/ are unchanged during procedure. Multiprocessing is unsafe when wd==wdr.
"""
out = StringIO()
print >>out, "%s:" % os.path.relpath(wd, topdir),
# Find appropriate data
xac_file = util.return_first_found_file(("XDS_ASCII.HKL_noscale.org", "XDS_ASCII.HKL_noscale",
"XDS_ASCII_fullres.HKL.org", "XDS_ASCII_fullres.HKL",
"XDS_ASCII.HKL.org", "XDS_ASCII.HKL", "DIALS.HKL.org", "DIALS.HKL"),
wd=wd)
if xac_file is None:
print >>out, "Can't find XDS_ASCII file in %s" % wd
log_out.write(out.getvalue())
log_out.flush()
return (wdr, None)
if xac_file.endswith(".org"): xac_file_out = xac_file[:-4]
else: xac_file_out = xac_file
xac = XDS_ASCII(xac_file, read_data=False)
print >>out, "%s %s (%s)" % (os.path.basename(xac_file), xac.symm.space_group_info(),
",".join(map(lambda x: "%.2f"%x, xac.symm.unit_cell().parameters())))
if xac.symm.reflection_intensity_symmetry(False).space_group_info().type().number() == sgnum_laue:
if xac.symm.unit_cell().is_similar_to(reference_symm.unit_cell(), 0.1, 10): # XXX Check unit cell consistency!!
print >>out, " Already scaled with specified symmetry"
log_out.write(out.getvalue())
log_out.flush()
if wd != wdr: shutil.copy2(xac_file, wdr)
if prep_dials_files and "DIALS.HKL" not in xac_file:
prepare_dials_files(wd, out, moveto=wdr)
return (wdr, (numpy.array(xac.symm.unit_cell().parameters()),
os.path.join(wdr, os.path.basename(xac_file))))
cosets = reindex.reindexing_operators(reference_symm, xac.symm, 0.2, 20) # XXX ISN'T THIS TOO LARGE?
if len(cosets.combined_cb_ops())==0:
print >>out, "Can't find operator:"
xac.symm.show_summary(out, " ")
reference_symm.show_summary(out, " ")
log_out.write(out.getvalue())
log_out.flush()
return (wdr, None)
if wd == wdr:
dest = tempfile.mkdtemp(prefix="multiprep", dir=wd)
else:
dest = wdr
hklout = os.path.join(dest, os.path.basename(xac_file_out))
newcell = xac.write_reindexed(op=cosets.combined_cb_ops()[0],
space_group=reference_symm.space_group(),
hklout=hklout)
if "DIALS.HKL" in os.path.basename(xac_file):
outstr = 'output.experiments="%sreindexed_experiments.json" ' % os.path.join(dest, "")
outstr += 'output.reflections="%sreindexed_reflections.pickle" ' % os.path.join(dest, "")
for f in ("experiments.json", "indexed.pickle"):
if not os.path.isfile(os.path.join(os.path.dirname(xac_file), f)): continue
util.call('dials.reindex %s change_of_basis_op=%s space_group="%s" %s'%(f,
cosets.combined_cb_ops()[0].as_abc(),
reference_symm.space_group_info(),
outstr),
wdir=os.path.dirname(xac_file))
elif prep_dials_files:
prepare_dials_files(wd, out,
space_group=reference_symm.space_group(),
reindex_op=cosets.combined_cb_ops()[0],
moveto=dest)
newcell_str = " ".join(map(lambda x: "%.3f"%x, newcell.parameters()))
print >>out, " Reindexed to transformed cell: %s with %s" % (newcell_str, cosets.combined_cb_ops()[0].as_hkl())
log_out.write(out.getvalue())
log_out.flush()
if wd == wdr:
for f in glob.glob(os.path.join(dest, "*")):
f_in_wd = os.path.join(wd, os.path.basename(f))
if os.path.exists(f_in_wd) and not os.path.exists(f_in_wd+".org"): os.rename(f_in_wd, f_in_wd+".org")
os.rename(f, f_in_wd)
shutil.rmtree(dest)
ret = (numpy.array(newcell.parameters()),
os.path.join(wd, os.path.basename(xac_file_out)))
else:
ret = (numpy.array(newcell.parameters()), hklout)
return (wdr, ret)
# reindex_with_specified_symm_worker()
def reindex_with_specified_symm(topdir, reference_symm, dirs, out, nproc=10, prep_dials_files=False, copyto_root=None):
print >>out
print >>out, "Re-index to specified symmetry:"
reference_symm.show_summary(out, " ")
print >>out
print >>out
out.flush()
st_time = time.time()
wd_ret = []
if copyto_root:
for wd in dirs:
assert wd.startswith(os.path.join(topdir, ""))
tmp = os.path.join(copyto_root, os.path.relpath(wd, topdir))
if not os.path.exists(tmp): os.makedirs(tmp)
wd_ret.append(tmp)
else:
wd_ret = dirs
sgnum_laue = reference_symm.space_group().build_derived_reflection_intensity_group(False).type().number()
ret = easy_mp.pool_map(fixed_func=lambda wd2: reindex_with_specified_symm_worker(wd2[0], wd2[1], topdir, out, reference_symm, sgnum_laue, prep_dials_files),
args=zip(dirs, wd_ret), processes=nproc)
cells = dict(filter(lambda x: x[1] is not None, ret)) # cell and file
print >>out, "\nTotal wall-clock time for reindexing: %.2f sec (using %d cores)." % (time.time()-st_time, nproc)
return cells
# reindex_with_specified_symm()
class PrepMerging:
def __init__(self, cell_graph):
self.cell_graph = cell_graph
self.cell_and_files = {}
self.log_buffer = None
# __init__()
def find_groups(self):
sio = StringIO()
self.cell_graph.group_xds_results(sio)
self.log_buffer = sio.getvalue()
return self.log_buffer
# find_groups()
def prep_merging(self, workdir, group, symmidx=None, reference_symm=None, topdir=None, cell_method="reindex", nproc=1, prep_dials_files=True, into_workdir=True):
assert (symmidx, reference_symm).count(None) == 1
from yamtbx.util.xtal import format_unit_cell
from cctbx.crystal import reindex
cm = self.cell_graph
prep_log_out = multi_out()
prep_log_out.register("log", open(os.path.join(workdir, "prep_merge.log"), "w"), atexit_send_to=None)
prep_log_out.register("stdout", sys.stdout)
prep_log_out.write(self.log_buffer)
prep_log_out.flush()
if reference_symm is None: reference_symm = cm.get_reference_symm(group-1, symmidx)
prep_log_out.write("\n\ngroup_choice= %d, symmetry= %s (%s)\n" % (group, reference_symm.space_group_info(),
format_unit_cell(reference_symm.unit_cell())))
prep_log_out.flush()
# Scale with specified symmetry
symms = map(lambda i: cm.symms[i], cm.groups[group-1])
dirs = map(lambda i: cm.dirs[i], cm.groups[group-1])
copyto_root = os.path.join(workdir, "input_files") if into_workdir else None
if not topdir: topdir = os.path.dirname(os.path.commonprefix(dirs))
if cell_method == "reindex":
self.cell_and_files = reindex_with_specified_symm(topdir, reference_symm, dirs,
out=prep_log_out, nproc=nproc,
prep_dials_files=prep_dials_files, copyto_root=copyto_root)
elif cell_method == "refine":
self.cell_and_files, reference_symm = rescale_with_specified_symm(topdir, dirs, symms,
reference_symm=reference_symm,
out=prep_log_out, nproc=nproc,
prep_dials_files=prep_dials_files,
copyto_root=copyto_root)
else:
raise "Don't know this choice: %s" % cell_method
prep_log_out.flush()
cosets = reindex.reindexing_operators(reference_symm, reference_symm, max_delta=5)
reidx_ops = cosets.combined_cb_ops()
print >>prep_log_out, "\nReference symmetry:", reference_symm.unit_cell(), reference_symm.space_group_info().symbol_and_number()
msg_reindex = "\n"
if len(reidx_ops) > 1:
msg_reindex += "!! ATTENTION !! Reindex operators found. You may need to reindex some files before merging.\n"
for rop in reidx_ops:
msg_reindex += " operator: %-16s Cell: (%s)\n" % (rop.as_hkl(),
format_unit_cell(reference_symm.unit_cell().change_basis(rop)))
msg_reindex += "Try kamo.resolve_indexing_ambiguity command before merging!!"
else:
msg_reindex += "No reindex operators found. No need to run kamo.resolve_indexing_ambiguity."
print >>prep_log_out, "%s\n\n" % msg_reindex
prep_log_out.close()
# Make list for merging
ofs_lst = open(os.path.join(workdir, "formerge.lst"), "w")
ofs_dat = open(os.path.join(workdir, "cells.dat"), "w")
ofs_dat.write("file a b c al be ga\n")
for wd in sorted(self.cell_and_files):
cell, xas = self.cell_and_files[wd]
ofs_lst.write(xas+"\n")
ofs_dat.write(xas+" "+" ".join(map(lambda x:"%7.3f"%x, cell))+"\n")
ofs_dat.close()
ofs_lst.close()
return msg_reindex, reidx_ops
# prep_merging()
def write_merging_scripts(self, workdir, sge_pe_name="par", prep_dials_files=True):
open(os.path.join(workdir, "merge_blend.sh"), "w").write("""\
#!/bin/sh
# settings
dmin=2.8 # resolution
anomalous=false # true or false
lstin=formerge.lst # list of XDS_ASCII.HKL files
use_ramdisk=true # set false if there is few memory or few space in /tmp
# _______/setting
kamo.multi_merge \\
workdir=blend_${dmin}A_framecc_b+B \\
lstin=${lstin} d_min=${dmin} anomalous=${anomalous} \\
space_group=None reference.data=None \\
program=xscale xscale.reference=bmin xscale.degrees_per_batch=None \\
reject_method=framecc+lpstats rejection.lpstats.stats=em.b+bfactor \\
clustering=blend blend.min_cmpl=90 blend.min_redun=2 blend.max_LCV=None blend.max_aLCV=None \\
max_clusters=None xscale.use_tmpdir_if_available=${use_ramdisk} \\
# batch.engine=sge batch.par_run=merging batch.nproc_each=8 nproc=8 batch.sge_pe_name=%s
""" % sge_pe_name)
os.chmod(os.path.join(workdir, "merge_blend.sh"), 0755)
open(os.path.join(workdir, "merge_ccc.sh"), "w").write("""\
#!/bin/sh
# settings
dmin=2.8 # resolution
clustering_dmin=3.5 # resolution for CC calculation
anomalous=false # true or false
lstin=formerge.lst # list of XDS_ASCII.HKL files
use_ramdisk=true # set false if there is few memory or few space in /tmp
# _______/setting
kamo.multi_merge \\
workdir=ccc_${dmin}A_framecc_b+B \\
lstin=${lstin} d_min=${dmin} anomalous=${anomalous} \\
space_group=None reference.data=None \\
program=xscale xscale.reference=bmin xscale.degrees_per_batch=None \\
reject_method=framecc+lpstats rejection.lpstats.stats=em.b+bfactor \\
clustering=cc cc_clustering.d_min=${clustering_dmin} cc_clustering.b_scale=false cc_clustering.use_normalized=false \\
cc_clustering.min_cmpl=90 cc_clustering.min_redun=2 \\
max_clusters=None xscale.use_tmpdir_if_available=${use_ramdisk} \\
# batch.engine=sge batch.par_run=merging batch.nproc_each=8 nproc=8 batch.sge_pe_name=%s
""" % sge_pe_name)
os.chmod(os.path.join(workdir, "merge_ccc.sh"), 0755)
open(os.path.join(workdir, "filter_cell.R"), "w").write(r"""iqrf <- 2.5
outliers <- function(x) {
q1 <- quantile(x, 0.25)
q3 <- quantile(x, 0.75)
iqr <- q3 - q1
return(x<q1-iqr*iqrf | x>q3+iqr*iqrf)
}
myhist <- function(v, title) {
if(sd(v)==0) {
plot.new()
return()
}
hist(v, main=paste("Histogram of", title), xlab=title)
q1 <- quantile(v, 0.25)
q3 <- quantile(v, 0.75)
iqr <- q3 - q1
abline(v=c(q1-iqr*iqrf, q3+iqr*iqrf), col="blue")
}
cells <- read.table("cells.dat", h=T)
good <- subset(cells, ! (outliers(a) | outliers(b) | outliers(c) | outliers(al) | outliers(be) | outliers(ga)))
write.table(good$file, "formerge_goodcell.lst", quote=F, row.names=F, col.names=F)
pdf("hist_cells.pdf", width=14, height=7)
par(mfrow=c(2,3))
myhist(cells$a, "a")
myhist(cells$b, "b")
myhist(cells$c, "c")
myhist(cells$al,"alpha")
myhist(cells$be,"beta")
myhist(cells$ga,"gamma")
dev.off()
cat("See hist_cells.pdf\n\n")
cat(sprintf("%4d files given.\n", nrow(cells)))
cat(sprintf("mean: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f\n", mean(cells$a), mean(cells$b), mean(cells$c), mean(cells$al), mean(cells$be), mean(cells$ga)))
cat(sprintf(" std: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f\n", sd(cells$a), sd(cells$b), sd(cells$c), sd(cells$al), sd(cells$be), sd(cells$ga)))
cat(sprintf(" iqr: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f\n", IQR(cells$a), IQR(cells$b), IQR(cells$c), IQR(cells$al), IQR(cells$be), IQR(cells$ga)))
cat(sprintf("\n%4d files removed.\n", nrow(cells)-nrow(good)))
cat(sprintf("mean: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f\n", mean(good$a), mean(good$b), mean(good$c), mean(good$al), mean(good$be), mean(good$ga)))
cat(sprintf(" std: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f\n", sd(good$a), sd(good$b), sd(good$c), sd(good$al), sd(good$be), sd(good$ga)))
cat(sprintf(" iqr: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f\n", IQR(good$a), IQR(good$b), IQR(good$c), IQR(good$al), IQR(good$be), IQR(good$ga)))
cat("\nUse formerge_goodcell.lst instead!\n")
""")
if prep_dials_files:
wd_jref = os.path.join(workdir, "dials_joint_refine")
os.mkdir(wd_jref)
ofs_phil = open(os.path.join(wd_jref, "experiments_and_reflections.phil"), "w")
ofs_phil.write("input {\n")
for wd in sorted(self.cell_and_files):
fe = os.path.join(wd, "experiments.json")
fp = os.path.join(wd, "integrate_hkl.pickle")
if os.path.isfile(fe) and os.path.isfile(fp):
ofs_phil.write(' experiments = "%s"\n' % fe)
ofs_phil.write(' reflections = "%s"\n' % fp)
ofs_phil.write("}\n")
ofs_phil.close()
open(os.path.join(wd_jref, "joint_refine.sh"), "w").write("""\
#!/bin/sh
dials.combine_experiments experiments_and_reflections.phil reference_from_experiment.beam=0 reference_from_experiment.goniometer=0 average_detector=true compare_models=false
dials.refine combined_experiments.json combined_reflections.pickle auto_reduction.action=remove verbosity=9
""")
# write_merging_scripts()
# class PrepMerging
def run(params):
if not params.workdir:
print "Give workdir="
return
if os.path.exists(params.workdir):
print "workdir already exists:", params.workdir
return
params.workdir = os.path.abspath(params.workdir)
if None not in (params.unit_cell, params.space_group):
user_xs = crystal.symmetry(params.unit_cell, params.space_group)
else:
user_xs = None
from yamtbx.dataproc.auto.command_line.multi_check_cell_consistency import CellGraph
cm = CellGraph(tol_length=params.cell_grouping.tol_length,
tol_angle=params.cell_grouping.tol_angle)
if len(params.xdsdir) == 1 and os.path.isfile(params.xdsdir[0]):
params.xdsdir = util.read_path_list(params.xdsdir[0])
xds_dirs = []
for xd0 in params.xdsdir:
for xd in glob.glob(xd0):
xds_dirs.extend(map(lambda x: x[0], filter(lambda x: any(map(lambda y: y.startswith("XDS_ASCII.HKL"), x[2])) or "DIALS.HKL" in x[2],
os.walk(os.path.abspath(xd)))))
for i, xd in enumerate(xds_dirs):
cm.add_proc_result(i, xd)
pm = PrepMerging(cm)
print pm.find_groups()
if len(cm.groups) == 0:
print "Oh, no. No data."
return
if params.group_choice is None:
while True:
try:
val = int(raw_input("Input group number [%d..%d]: " % (1, len(cm.groups))))
if not 0 < val <= len(cm.groups): raise ValueError
params.group_choice = val
break
except ValueError:
continue
symms = cm.get_selectable_symms(params.group_choice-1)
symmidx = -1
if user_xs:
#for xs in cm.get_selectable_symms(params.group_choice):
raise "Not supported now."
while True:
try:
val = int(raw_input("Input symmetry number [%d..%d]: " % (0, len(symms)-1)))
if not 0 <= val < len(symms): raise ValueError
symmidx = val
break
except ValueError:
continue
os.mkdir(params.workdir)
topdir = os.path.dirname(os.path.commonprefix(xds_dirs))
pm.prep_merging(group=params.group_choice, symmidx=symmidx, workdir=params.workdir, topdir=topdir,
cell_method=params.cell_method, nproc=params.nproc, prep_dials_files=params.prep_dials_files, into_workdir=params.copy_into_workdir)
pm.write_merging_scripts(params.workdir, "par", params.prep_dials_files)
# run()
def run_from_args(argv):
if "-h" in argv or "--help" in argv:
print """
kamo.multi_prep_merging is a helper program to prepare for merging multiple (small wedge) datasets.
Typical usage:
kamo.multi_prep_merging workdir=merge_reidx xdsdir=../\*/
All parameters:
"""
iotbx.phil.parse(master_params_str).show(prefix=" ", attributes_level=1)
return
cmdline = iotbx.phil.process_command_line(args=argv,
master_string=master_params_str)
params = cmdline.work.extract()
args = cmdline.remaining_args
run(params)
# run_from_args()
if __name__ == "__main__":
import sys
run_from_args(sys.argv[1:])
|
keitaroyam/yamtbx
|
yamtbx/dataproc/auto/command_line/multi_prep_merging.py
|
Python
|
bsd-3-clause
| 27,305
|
[
"CRYSTAL"
] |
bb7ba4a40f82ad78b69931f354428fc34607e28ae22f7926067c1f72944e109a
|
# Code from Chapter 18 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by Stephen Marsland (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# Stephen Marsland, 2014
import pylab as pl
import numpy as np
# Weibull and Gaussian fits
pl.ion()
pl.figure()
l = 1.
k = 2.
x = np.random.random(27)*2.
x = np.concatenate((x,np.random.rand(9)+2.))
xx = k/l * (x/l)**(k-1) * np.exp(-(x/l)**k) + np.random.random(36)*0.2-0.1
pl.plot(x,xx,'o')
pl.figure()
pl.plot(x,xx,'o')
x = np.arange(0,3,0.01)
s = 0.5
mu = 0.7
y = 1/(np.sqrt(2*np.pi)*s) * np.exp(-0.5*(x-mu)**2/s**2)
pl.plot(x,y,'k')
z = k/l * (x/l)**(k-1) * np.exp(-(x/l)**k)
pl.plot(x,z,'r--')
|
Anderson-Lab/anderson-lab.github.io
|
csc_466_2021_spring/MLCode/Ch18/plotdist.py
|
Python
|
mit
| 850
|
[
"Gaussian"
] |
44fd5644677e0aa365e648ba610da5761e7c8e8f12a93408c2adb02b90d29a5e
|
import unittest
import cryptosite
import saliweb.test
import saliweb.backend
import os
import subprocess
class MockCheckCall(object):
def __init__(self):
self.calls = []
def __call__(self, cmd, **keys):
self.calls.append(cmd)
class Tests(saliweb.test.TestCase):
"""Test postprocessing methods"""
def test_postprocess_first_ok(self):
"""Test postprocess_first(), OK completion"""
j = self.make_test_job(cryptosite.Job, 'POSTPROCESSING')
with open(os.path.join(j.directory, 'setup.log'), 'w') as fh:
fh.write('test')
j._run_in_job_directory(j.postprocess_first)
def test_postprocess_first_error(self):
"""Test postprocess_first(), with errors"""
j = self.make_test_job(cryptosite.Job, 'RUNNING')
with open(os.path.join(j.directory, 'setup.log'), 'w') as fh:
fh.write('The following chains were not found in the input '
'PDB file: A, B')
j._run_in_job_directory(j.postprocess_first)
def test_postprocess_final(self):
"""Test postprocess_final()"""
j = self.make_test_job(cryptosite.Job, 'RUNNING')
for fname in ('XXX.pol.pred', 'XXX.pol.pred.pdb'):
with open(os.path.join(j.directory, fname), 'w') as fh:
fh.write('test')
old_check_call = subprocess.check_call
mock_cc = MockCheckCall()
try:
subprocess.check_call = mock_cc
j._run_in_job_directory(j.postprocess_final)
finally:
subprocess.check_call = old_check_call
self.assertEqual(len(mock_cc.calls), 2)
top = 'http://server/test/path/testjob'
self.assertTrue(mock_cc.calls[0].endswith(
'cryptosite chimera %s/cryptosite.pol.pred.pdb?passwd=abc '
'%s/cryptosite.pol.pred?passwd=abc cryptosite.chimerax'
% (top, top)))
self.assertEqual(mock_cc.calls[1],
["zip", "chimera.zip", "cryptosite.chimerax"])
for fname in ('cryptosite.pol.pred', 'cryptosite.pol.pred.pdb'):
os.unlink(os.path.join(j.directory, fname))
if __name__ == '__main__':
unittest.main()
|
salilab/cryptosite-web
|
test/backend/test_post.py
|
Python
|
lgpl-2.1
| 2,221
|
[
"ChimeraX"
] |
8f844deef1a25b70ff6b947c7c39a81751610377bcb089ca8a0bc06ac2c4d674
|
# Copyright 2017 Norman Heckscher. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the MNIST network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
FLAGS = tf.app.flags.FLAGS
# tf.app.flags.DEFINE_string('data_dir', '/home/norman/MNIST_data',
# """Path to the MNIST data directory.""")
# Global constants describing the MNIST data set.
IMAGE_SIZE = 28
NUM_CLASSES = 10
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
# Constants describing the training process.
INITIAL_LEARNING_RATE = 0.0001 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
mnist = input_data.read_data_sets('/home/norman/MNIST_data', one_hot=False)
def inputs(batch_size=50):
"""Construct input for MNIST training using the TensorFlow framework.
Returns:
images: mnist images
labels: mnist labels
"""
images, labels = mnist.train.next_batch(batch_size)
return images, labels
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from MNIST or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int32)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def inference(images, keep_prob=1.0):
"""Build the MNIST model.
Args:
images: Images returned from MNIST or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training
# runs. If we only ran this model on a single GPU, we could simplify this
# function by replacing all instances of tf.get_variable()
# with tf.Variable().
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images
# are grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
x_image = tf.reshape(images, [-1, 28, 28, 1])
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 1, 32],
stddev=5e-2,
wd=0.0)
biases = _variable_on_cpu('biases', [32], tf.constant_initializer(0.0))
conv = tf.nn.conv2d(x_image, kernel, strides=[1, 1, 1, 1],
padding='SAME')
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 32, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, strides=[1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [-1, 7 * 7 * 64])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 1024],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [1024],
tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases,
name=scope.name)
_activation_summary(local3)
# local4 with dropout
with tf.variable_scope('local4') as scope:
# keep_prob = tf.placeholder(tf.float32, name="keep_prob")
local4 = tf.nn.dropout(local3, keep_prob, name=scope.name)
weights = _variable_with_weight_decay('weights', shape=[1024, 10],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [10], tf.constant_initializer(0.1))
softmax_linear = tf.add(tf.matmul(local4, weights), biases,
name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
|
normanheckscher/mnist-multi-gpu
|
older/model.py
|
Python
|
apache-2.0
| 8,993
|
[
"Gaussian"
] |
504ecc8c69cc234f86839b025596b6cf04b179671a1aec40208e473d5a9bb008
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.