text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Package providing filter rules for Gramps.
"""
from ._searchfathername import SearchFatherName
from ._searchmothername import SearchMotherName
from ._searchchildname import SearchChildName
from ._regexpfathername import RegExpFatherName
from ._regexpmothername import RegExpMotherName
from ._regexpchildname import RegExpChildName
from ._hasreltype import HasRelType
from ._allfamilies import AllFamilies
from ._hasgallery import HasGallery
from ._hasidof import HasIdOf
from ._haslds import HasLDS
from ._regexpidof import RegExpIdOf
from ._hasnote import HasNote
from ._hasnoteregexp import HasNoteRegexp
from ._hasnotematchingsubstringof import HasNoteMatchingSubstringOf
from ._hassourcecount import HasSourceCount
from ._hassourceof import HasSourceOf
from ._hasreferencecountof import HasReferenceCountOf
from ._hascitation import HasCitation
from ._familyprivate import FamilyPrivate
from ._hasattribute import HasAttribute
from ._hasevent import HasEvent
from ._isbookmarked import IsBookmarked
from ._matchesfilter import MatchesFilter
from ._matchessourceconfidence import MatchesSourceConfidence
from ._fatherhasnameof import FatherHasNameOf
from ._fatherhasidof import FatherHasIdOf
from ._motherhasnameof import MotherHasNameOf
from ._motherhasidof import MotherHasIdOf
from ._childhasnameof import ChildHasNameOf
from ._childhasidof import ChildHasIdOf
from ._changedsince import ChangedSince
from ._hastag import HasTag
from ._hastwins import HasTwins
from ._isancestorof import IsAncestorOf
from ._isdescendantof import IsDescendantOf
editor_rule_list = [
AllFamilies,
HasRelType,
HasGallery,
HasIdOf,
HasLDS,
HasNote,
RegExpIdOf,
HasNoteRegexp,
HasReferenceCountOf,
HasSourceCount,
HasSourceOf,
HasCitation,
FamilyPrivate,
HasEvent,
HasAttribute,
IsBookmarked,
MatchesFilter,
MatchesSourceConfidence,
FatherHasNameOf,
FatherHasIdOf,
MotherHasNameOf,
MotherHasIdOf,
ChildHasNameOf,
ChildHasIdOf,
ChangedSince,
HasTag,
HasTwins,
IsAncestorOf,
IsDescendantOf,
]
| SNoiraud/gramps | gramps/gen/filters/rules/family/__init__.py | Python | gpl-2.0 | 2,993 | [
"Brian"
] | 1fc119a81144048a8ae1a40e11c3e88d6a7f27c6cf7e2f2a7cc6e5c0ad58dbdc |
# encoding: utf-8
"""
Paging capabilities for IPython.core
Authors:
* Brian Granger
* Fernando Perez
Notes
-----
For now this uses ipapi, so it can't be in IPython.utils. If we can get
rid of that dependency, we could move it there.
-----
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import os
import re
import sys
import tempfile
from io import UnsupportedOperation
from IPython import get_ipython
from IPython.core.error import TryNext
from IPython.utils.data import chop
from IPython.utils import io
from IPython.utils.process import system
from IPython.utils.terminal import get_terminal_size
from IPython.utils import py3compat
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
esc_re = re.compile(r"(\x1b[^m]+m)")
def page_dumb(strng, start=0, screen_lines=25):
"""Very dumb 'pager' in Python, for when nothing else works.
Only moves forward, same interface as page(), except for pager_cmd and
mode."""
out_ln = strng.splitlines()[start:]
screens = chop(out_ln,screen_lines-1)
if len(screens) == 1:
print(os.linesep.join(screens[0]), file=io.stdout)
else:
last_escape = ""
for scr in screens[0:-1]:
hunk = os.linesep.join(scr)
print(last_escape + hunk, file=io.stdout)
if not page_more():
return
esc_list = esc_re.findall(hunk)
if len(esc_list) > 0:
last_escape = esc_list[-1]
print(last_escape + os.linesep.join(screens[-1]), file=io.stdout)
def _detect_screen_size(screen_lines_def):
"""Attempt to work out the number of lines on the screen.
This is called by page(). It can raise an error (e.g. when run in the
test suite), so it's separated out so it can easily be called in a try block.
"""
TERM = os.environ.get('TERM',None)
if not((TERM=='xterm' or TERM=='xterm-color') and sys.platform != 'sunos5'):
# curses causes problems on many terminals other than xterm, and
# some termios calls lock up on Sun OS5.
return screen_lines_def
try:
import termios
import curses
except ImportError:
return screen_lines_def
# There is a bug in curses, where *sometimes* it fails to properly
# initialize, and then after the endwin() call is made, the
# terminal is left in an unusable state. Rather than trying to
# check everytime for this (by requesting and comparing termios
# flags each time), we just save the initial terminal state and
# unconditionally reset it every time. It's cheaper than making
# the checks.
term_flags = termios.tcgetattr(sys.stdout)
# Curses modifies the stdout buffer size by default, which messes
# up Python's normal stdout buffering. This would manifest itself
# to IPython users as delayed printing on stdout after having used
# the pager.
#
# We can prevent this by manually setting the NCURSES_NO_SETBUF
# environment variable. For more details, see:
# http://bugs.python.org/issue10144
NCURSES_NO_SETBUF = os.environ.get('NCURSES_NO_SETBUF', None)
os.environ['NCURSES_NO_SETBUF'] = ''
# Proceed with curses initialization
try:
scr = curses.initscr()
except AttributeError:
# Curses on Solaris may not be complete, so we can't use it there
return screen_lines_def
screen_lines_real,screen_cols = scr.getmaxyx()
curses.endwin()
# Restore environment
if NCURSES_NO_SETBUF is None:
del os.environ['NCURSES_NO_SETBUF']
else:
os.environ['NCURSES_NO_SETBUF'] = NCURSES_NO_SETBUF
# Restore terminal state in case endwin() didn't.
termios.tcsetattr(sys.stdout,termios.TCSANOW,term_flags)
# Now we have what we needed: the screen size in rows/columns
return screen_lines_real
#print '***Screen size:',screen_lines_real,'lines x',\
#screen_cols,'columns.' # dbg
def page(strng, start=0, screen_lines=0, pager_cmd=None):
"""Print a string, piping through a pager after a certain length.
The screen_lines parameter specifies the number of *usable* lines of your
terminal screen (total lines minus lines you need to reserve to show other
information).
If you set screen_lines to a number <=0, page() will try to auto-determine
your screen size and will only use up to (screen_size+screen_lines) for
printing, paging after that. That is, if you want auto-detection but need
to reserve the bottom 3 lines of the screen, use screen_lines = -3, and for
auto-detection without any lines reserved simply use screen_lines = 0.
If a string won't fit in the allowed lines, it is sent through the
specified pager command. If none given, look for PAGER in the environment,
and ultimately default to less.
If no system pager works, the string is sent through a 'dumb pager'
written in python, very simplistic.
"""
# Some routines may auto-compute start offsets incorrectly and pass a
# negative value. Offset to 0 for robustness.
start = max(0, start)
# first, try the hook
ip = get_ipython()
if ip:
try:
ip.hooks.show_in_pager(strng)
return
except TryNext:
pass
# Ugly kludge, but calling curses.initscr() flat out crashes in emacs
TERM = os.environ.get('TERM','dumb')
if TERM in ['dumb','emacs'] and os.name != 'nt':
print(strng)
return
# chop off the topmost part of the string we don't want to see
str_lines = strng.splitlines()[start:]
str_toprint = os.linesep.join(str_lines)
num_newlines = len(str_lines)
len_str = len(str_toprint)
# Dumb heuristics to guesstimate number of on-screen lines the string
# takes. Very basic, but good enough for docstrings in reasonable
# terminals. If someone later feels like refining it, it's not hard.
numlines = max(num_newlines,int(len_str/80)+1)
screen_lines_def = get_terminal_size()[1]
# auto-determine screen size
if screen_lines <= 0:
try:
screen_lines += _detect_screen_size(screen_lines_def)
except (TypeError, UnsupportedOperation):
print(str_toprint, file=io.stdout)
return
#print 'numlines',numlines,'screenlines',screen_lines # dbg
if numlines <= screen_lines :
#print '*** normal print' # dbg
print(str_toprint, file=io.stdout)
else:
# Try to open pager and default to internal one if that fails.
# All failure modes are tagged as 'retval=1', to match the return
# value of a failed system command. If any intermediate attempt
# sets retval to 1, at the end we resort to our own page_dumb() pager.
pager_cmd = get_pager_cmd(pager_cmd)
pager_cmd += ' ' + get_pager_start(pager_cmd,start)
if os.name == 'nt':
if pager_cmd.startswith('type'):
# The default WinXP 'type' command is failing on complex strings.
retval = 1
else:
fd, tmpname = tempfile.mkstemp('.txt')
try:
os.close(fd)
with open(tmpname, 'wt') as tmpfile:
tmpfile.write(strng)
cmd = "%s < %s" % (pager_cmd, tmpname)
# tmpfile needs to be closed for windows
if os.system(cmd):
retval = 1
else:
retval = None
finally:
os.remove(tmpname)
else:
try:
retval = None
# if I use popen4, things hang. No idea why.
#pager,shell_out = os.popen4(pager_cmd)
pager = os.popen(pager_cmd, 'w')
try:
pager_encoding = pager.encoding or sys.stdout.encoding
pager.write(py3compat.cast_bytes_py2(
strng, encoding=pager_encoding))
finally:
retval = pager.close()
except IOError as msg: # broken pipe when user quits
if msg.args == (32, 'Broken pipe'):
retval = None
else:
retval = 1
except OSError:
# Other strange problems, sometimes seen in Win2k/cygwin
retval = 1
if retval is not None:
page_dumb(strng,screen_lines=screen_lines)
def page_file(fname, start=0, pager_cmd=None):
"""Page a file, using an optional pager command and starting line.
"""
pager_cmd = get_pager_cmd(pager_cmd)
pager_cmd += ' ' + get_pager_start(pager_cmd,start)
try:
if os.environ['TERM'] in ['emacs','dumb']:
raise EnvironmentError
system(pager_cmd + ' ' + fname)
except:
try:
if start > 0:
start -= 1
page(open(fname).read(),start)
except:
print('Unable to show file',repr(fname))
def get_pager_cmd(pager_cmd=None):
"""Return a pager command.
Makes some attempts at finding an OS-correct one.
"""
if os.name == 'posix':
default_pager_cmd = 'less -r' # -r for color control sequences
elif os.name in ['nt','dos']:
default_pager_cmd = 'type'
if pager_cmd is None:
try:
pager_cmd = os.environ['PAGER']
except:
pager_cmd = default_pager_cmd
return pager_cmd
def get_pager_start(pager, start):
"""Return the string for paging files with an offset.
This is the '+N' argument which less and more (under Unix) accept.
"""
if pager in ['less','more']:
if start:
start_string = '+' + str(start)
else:
start_string = ''
else:
start_string = ''
return start_string
# (X)emacs on win32 doesn't like to be bypassed with msvcrt.getch()
if os.name == 'nt' and os.environ.get('TERM','dumb') != 'emacs':
import msvcrt
def page_more():
""" Smart pausing between pages
@return: True if need print more lines, False if quit
"""
io.stdout.write('---Return to continue, q to quit--- ')
ans = msvcrt.getwch()
if ans in ("q", "Q"):
result = False
else:
result = True
io.stdout.write("\b"*37 + " "*37 + "\b"*37)
return result
else:
def page_more():
ans = py3compat.input('---Return to continue, q to quit--- ')
if ans.lower().startswith('q'):
return False
else:
return True
def snip_print(str,width = 75,print_full = 0,header = ''):
"""Print a string snipping the midsection to fit in width.
print_full: mode control:
- 0: only snip long strings
- 1: send to page() directly.
- 2: snip long strings and ask for full length viewing with page()
Return 1 if snipping was necessary, 0 otherwise."""
if print_full == 1:
page(header+str)
return 0
print(header, end=' ')
if len(str) < width:
print(str)
snip = 0
else:
whalf = int((width -5)/2)
print(str[:whalf] + ' <...> ' + str[-whalf:])
snip = 1
if snip and print_full == 2:
if py3compat.input(header+' Snipped. View (y/n)? [N]').lower() == 'y':
page(str)
return snip
| WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/IPython/core/page.py | Python | bsd-3-clause | 12,093 | [
"Brian"
] | b3c089c01d9e6fec31a93257aa6f1ca075f52de6112b4840ee0787f4296aaa24 |
from Firefly import logging
from Firefly.components.zwave.device_types.contact_sensor import ZwaveContactSensor
from Firefly.const import CONTACT, CONTACT_CLOSED
ALARM = 'alarm'
BATTERY = 'battery'
TITLE = 'ZW120 Aeotec Door/Window Sensor'
COMMANDS = []
REQUESTS = [ALARM, BATTERY, CONTACT]
INITIAL_VALUES = {
'_alarm': False,
'_battery': -1,
'_contact': CONTACT_CLOSED
}
def Setup(firefly, package, **kwargs):
logging.message('Entering %s setup' % TITLE)
sensor = ZW120(firefly, package, **kwargs)
firefly.install_component(sensor)
return sensor.id
class ZW120(ZwaveContactSensor):
def __init__(self, firefly, package, **kwargs):
initial_values = INITIAL_VALUES
if kwargs.get('initial_values') is not None:
initial_values_updated = INITIAL_VALUES.copy()
initial_values_updated.update(kwargs.get('initial_values'))
initial_values = initial_values_updated
kwargs.update({
'initial_values': initial_values,
'commands': COMMANDS,
'requests': REQUESTS
})
super().__init__(firefly, package, TITLE, **kwargs)
def update_device_config(self, **kwargs):
# TODO: Pull these out into config values
"""
Updated the devices to the desired config params. This will be useful to make new default devices configs.
For example when there is a gen6 multisensor I want it to always report every 5 minutes and timeout to be 30
seconds.
Args:
**kwargs ():
"""
# https://github.com/OpenZWave/open-zwave/blob/master/config/aeotec/zw120.xml
# (2, 0), # Disable 10 min wake up time
successful = self.verify_set_zwave_params([
(121, 17) # Sensor Binary and Battery Report
])
self._update_try_count += 1
self._config_updated = successful
| Firefly-Automation/Firefly | Firefly/components/zwave/aeotec/zw120_door_window_sensor_gen5.py | Python | apache-2.0 | 1,780 | [
"Firefly"
] | 439880a945abe9d0f5e4533359ce57aebee8c99c6ef04e73e4b6fb2ad3659500 |
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import os,unittest,numpy as np
from pyscf.nao.m_rsphar_libnao import rsphar_vec as rsphar_vec_libnao
class KnowValues(unittest.TestCase):
def test_rsh_vec(self):
""" Compute real spherical harmonics via a vectorized algorithm """
from pyscf.nao.m_rsphar_libnao import rsphar_exp_vec as rsphar_exp_vec_libnao
from pyscf.nao.m_rsphar_vec import rsphar_vec as rsphar_vec_python
from timeit import default_timer as timer
ll = [0,1,2,3,4]
crds = np.random.rand(20000, 3)
for lmax in ll:
t1 = timer()
rsh1 = rsphar_exp_vec_libnao(crds.T, lmax)
t2 = timer(); tpython = (t2-t1); t1 = timer()
rsh2 = rsphar_vec_libnao(crds, lmax)
t2 = timer(); tlibnao = (t2-t1); t1 = timer()
#print( abs(rsh1.T-rsh2).sum(), tpython, tlibnao)
# print( rsh1[1,:])
# print( rsh2[1,:])
if __name__ == "__main__": unittest.main()
| gkc1000/pyscf | pyscf/nao/test/test_0031_rsh_vec.py | Python | apache-2.0 | 1,558 | [
"PySCF"
] | 66347d4248547b23cc67a3bbf1ff73d789bf37bd6d94d1b2e24b71dc4659dd29 |
from io import open
from logging import getLogger
from os.path import abspath, dirname, join
from xml.etree import ElementTree
from pulsar.tools.validator import ExpressionValidator
log = getLogger(__name__)
class ToolBox(object):
"""
Abstraction over a tool config file largely modelled after
Galaxy's shed_tool_conf.xml. Hopefully over time this toolbox
schema will be a direct superset of Galaxy's with extensions
to support simple, non-toolshed based tool setups.
"""
def __init__(self, path_string):
self.tool_configs = []
paths = [path.strip() for path in path_string.split(",")]
for path in paths:
toolbox_tree = ElementTree.parse(path)
toolbox_root = toolbox_tree.getroot()
tool_path = toolbox_root.get('tool_path')
self.__load_tools_from_els(toolbox_root, tool_path)
def __load_tools_from_els(self, toolbox_root, tool_path):
els = toolbox_root.findall('tool')
for el in els:
try:
if 'guid' in el.attrib:
tool_cls = ToolShedToolConfig
else:
tool_cls = SimpleToolConfig
tool = tool_cls(el, tool_path)
self.tool_configs.append(tool)
except Exception:
log.exception('Failed to load tool.')
def get_tool(self, id):
# Need to handle multiple tools per id someday, but
# starting simple.
tools = self.__find_tools_by_id(id)
if not tools:
raise KeyError("Failed to find tool with id '%s'" % id)
if len(tools) > 1:
log.warn("Found multiple tools with id '%s', returning first." % id)
return tools[0]
def __find_tools_by_id(self, id):
return [tool for tool in self.tool_configs if tool.id == id]
class InputsValidator(object):
def __init__(self, command_validator, config_validators):
self.command_validator = command_validator
self.config_validators = config_validators
def validate_command(self, job_directory, command):
return self.command_validator.validate(job_directory, command)
def validate_config(self, job_directory, name, path):
config_validator = self.config_validators.get(name, None)
valid = True
if config_validator:
contents = open(path, "r", encoding="UTF-8").read()
valid = config_validator.validate(job_directory, contents)
return valid
class ToolConfig(object):
"""
Abstract description of a Galaxy tool.
"""
def __init__(self):
super(ToolConfig, self).__init__()
def get_tool_dir(self):
return abspath(dirname(self.path))
@property
def inputs_validator(self):
if not hasattr(self, "_inputs_validator"):
command_el = self._root().find("./validators/command_validator")
command_validator = ExpressionValidator(command_el)
config_validators = {}
for config_el in self._root().findall("./validators/configfile_validator"):
name = config_el.get("name")
config_validators[name] = ExpressionValidator(config_el)
self._inputs_validator = InputsValidator(command_validator, config_validators)
return self._inputs_validator
def _root(self):
return self._el().getroot()
def _el(self):
return ElementTree.parse(self.path)
class SimpleToolConfig(ToolConfig):
"""
Abstract description of a Galaxy tool loaded from a
toolbox with the `tool` tag not containing a guid, i.e.
one not from the toolshed.
"""
def __init__(self, tool_el, tool_path):
super(SimpleToolConfig, self).__init__()
rel_path = tool_el.get('file')
assert tool_path, "tool_path not set, toolbox XML files must be configured with a tool_path directory."
assert rel_path, "file not set on tool, each tool element must define a file attribute pointing to a valid tool XML file."
resolved_path = join(tool_path, rel_path)
self.path = resolved_path
root = self._root()
self.id = root.get('id')
self.version = root.get('version', '1.0.0')
self.tool_dir = dirname(resolved_path)
class ToolShedToolConfig(SimpleToolConfig):
"""
Abstract description of a Galaxy tool loaded from a
toolbox with the `tool` tag, i.e. one from the
toolshed.
::
<tool file="../shed_tools/gvk.bx.psu.edu/repos/test/column_maker/f06aa1bf1e8a/column_maker/column_maker.xml" guid\
="gvk.bx.psu.edu:9009/repos/test/column_maker/Add_a_column1/1.1.0">
<tool_shed>gvk.bx.psu.edu:9009</tool_shed>
<repository_name>column_maker</repository_name>
<repository_owner>test</repository_owner>
<installed_changeset_revision>f06aa1bf1e8a</installed_changeset_revision
<id>gvk.bx.psu.edu:9009/repos/test/column_maker/Add_a_column1/1.1.0</id>
<version>1.1.0</version>
</tool>
"""
def __init__(self, tool_el, tool_path):
super(ToolShedToolConfig, self).__init__(tool_el, tool_path)
self.guid = tool_el.get("guid")
# Override id in file for tool shed tools. Use GUID instead.
self.id = self.guid
| natefoo/pulsar | pulsar/tools/toolbox.py | Python | apache-2.0 | 5,322 | [
"Galaxy"
] | a423b759ad57002d18d55eb2c2e39e9952c31d53db13571a00671a715897a673 |
"""
====================================================
How to convert 3D electrode positions to a 2D image.
====================================================
Sometimes we want to convert a 3D representation of electrodes into a 2D
image. For example, if we are using electrocorticography it is common to
create scatterplots on top of a brain, with each point representing an
electrode.
In this example, we'll show two ways of doing this in MNE-Python. First,
if we have the 3D locations of each electrode then we can use Mayavi to
take a snapshot of a view of the brain. If we do not have these 3D locations,
and only have a 2D image of the electrodes on the brain, we can use the
:class:`mne.viz.ClickableImage` class to choose our own electrode positions
on the image.
"""
# Authors: Christopher Holdgraf <choldgraf@berkeley.edu>
#
# License: BSD (3-clause)
from scipy.io import loadmat
import numpy as np
from mayavi import mlab
from matplotlib import pyplot as plt
from os import path as op
import mne
from mne.viz import ClickableImage # noqa
from mne.viz import plot_alignment, snapshot_brain_montage
print(__doc__)
subjects_dir = mne.datasets.sample.data_path() + '/subjects'
path_data = mne.datasets.misc.data_path() + '/ecog/sample_ecog.mat'
# We've already clicked and exported
layout_path = op.join(op.dirname(mne.__file__), 'data', 'image')
layout_name = 'custom_layout.lout'
###############################################################################
# Load data
# ---------
#
# First we'll load a sample ECoG dataset which we'll use for generating
# a 2D snapshot.
mat = loadmat(path_data)
ch_names = mat['ch_names'].tolist()
elec = mat['elec']
dig_ch_pos = dict(zip(ch_names, elec))
mon = mne.channels.DigMontage(dig_ch_pos=dig_ch_pos)
info = mne.create_info(ch_names, 1000., 'ecog', montage=mon)
print('Created %s channel positions' % len(ch_names))
###############################################################################
# Project 3D electrodes to a 2D snapshot
# --------------------------------------
#
# Because we have the 3D location of each electrode, we can use the
# :func:`mne.viz.snapshot_brain_montage` function to return a 2D image along
# with the electrode positions on that image. We use this in conjunction with
# :func:`mne.viz.plot_alignment`, which visualizes electrode positions.
fig = plot_alignment(info, subject='sample', subjects_dir=subjects_dir,
surfaces=['pial'], meg=False)
mlab.view(200, 70)
xy, im = snapshot_brain_montage(fig, mon)
# Convert from a dictionary to array to plot
xy_pts = np.vstack(xy[ch] for ch in info['ch_names'])
# Define an arbitrary "activity" pattern for viz
activity = np.linspace(100, 200, xy_pts.shape[0])
# This allows us to use matplotlib to create arbitrary 2d scatterplots
fig2, ax = plt.subplots(figsize=(10, 10))
ax.imshow(im)
ax.scatter(*xy_pts.T, c=activity, s=200, cmap='coolwarm')
ax.set_axis_off()
# fig2.savefig('./brain.png', bbox_inches='tight') # For ClickableImage
###############################################################################
# Manually creating 2D electrode positions
# ----------------------------------------
#
# If we don't have the 3D electrode positions then we can still create a
# 2D representation of the electrodes. Assuming that you can see the electrodes
# on the 2D image, we can use :class:`mne.viz.ClickableImage` to open the image
# interactively. You can click points on the image and the x/y coordinate will
# be stored.
#
# We'll open an image file, then use ClickableImage to
# return 2D locations of mouse clicks (or load a file already created).
# Then, we'll return these xy positions as a layout for use with plotting topo
# maps.
# This code opens the image so you can click on it. Commented out
# because we've stored the clicks as a layout file already.
# # The click coordinates are stored as a list of tuples
# im = plt.imread('./brain.png')
# click = ClickableImage(im)
# click.plot_clicks()
# # Generate a layout from our clicks and normalize by the image
# print('Generating and saving layout...')
# lt = click.to_layout()
# lt.save(op.join(layout_path, layout_name)) # To save if we want
# # We've already got the layout, load it
lt = mne.channels.read_layout(layout_name, path=layout_path, scale=False)
x = lt.pos[:, 0] * float(im.shape[1])
y = (1 - lt.pos[:, 1]) * float(im.shape[0]) # Flip the y-position
fig, ax = plt.subplots()
ax.imshow(im)
ax.scatter(x, y, s=120, color='r')
plt.autoscale(tight=True)
ax.set_axis_off()
plt.show()
| mne-tools/mne-tools.github.io | 0.15/_downloads/plot_3d_to_2d.py | Python | bsd-3-clause | 4,539 | [
"Mayavi"
] | 92285373db387102cb9ab03f139f4a461098fc55aec7e1ef4d5dac6f8b94ae7e |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from morphforge.simulation.neuron.core.neuronsimulationenvironment import NEURONEnvironment
from morphforgecontrib.simulation.channels.neuroml_via_neurounits.neuroml_via_neurounits_core import NeuroML_Via_NeuroUnits_Channel
from neurounits.importers.neuroml import ChannelMLReader
from morphforgecontrib.simulation.channels.neurounits.neuro_units_bridge import Neuron_NeuroUnitEqnsetMechanism
class NeuroML_Via_NeuroUnits_ChannelNEURON(Neuron_NeuroUnitEqnsetMechanism, NeuroML_Via_NeuroUnits_Channel):
def __init__(self, xml_filename, chlname=None,**kwargs):
(eqnset, chlinfo, default_params) = ChannelMLReader.BuildEqnset(xml_filename)
default_params = dict([(k, v.as_quantities_quantity()) for (k, v) in default_params.iteritems()])
super(NeuroML_Via_NeuroUnits_ChannelNEURON,self).__init__(eqnset=eqnset, default_parameters=default_params, recordables_map=None, recordables_data=None, xml_filename=xml_filename, chlname=chlname, **kwargs)
NEURONEnvironment.channels.register_plugin(NeuroML_Via_NeuroUnits_Channel, NeuroML_Via_NeuroUnits_ChannelNEURON)
| mikehulluk/morphforge | src/morphforgecontrib/simulation/channels/neuroml_via_neurounits/neuroml_via_neurounits_neuron.py | Python | bsd-2-clause | 2,634 | [
"NEURON"
] | b8d686830879042c6a349a26b6008d88783088c1471393f0c7d543dc51b69b9d |
#!/usr/bin/env python
import unittest
import numpy as np
import pyscf.pbc.mp.kmp2
from pyscf.pbc.mp.kmp2 import get_nocc, get_nmo, get_frozen_mask
class fake_mp:
def __init__(self, frozen, mo_occ, nkpts):
self._nocc = None
self._nmo = None
self.frozen = frozen
self.mo_occ = mo_occ
self.nkpts = nkpts
get_nocc = get_nocc
get_nmo = get_nmo
class KnownValues(unittest.TestCase):
def test_no_frozen(self):
mp = fake_mp(frozen=None, mo_occ=[np.array([2, 2, 2, 0, 0]),], nkpts=1)
nocc = get_nocc(mp)
nmo = get_nmo(mp)
self.assertAlmostEqual(nocc, 3)
self.assertAlmostEqual(nmo, 5)
nocc = get_nocc(mp, per_kpoint=True)
nmo = get_nmo(mp, per_kpoint=True)
self.assertListEqual(nocc, [3])
self.assertListEqual(nmo, [5])
def test_frozen_int(self):
mp = fake_mp(frozen=1, mo_occ=[np.array([2, 2, 2, 0, 0]), np.array([2, 2, 0, 0, 0])], nkpts=2)
nocc = get_nocc(mp)
nmo = get_nmo(mp)
self.assertAlmostEqual(nocc, 2)
self.assertAlmostEqual(nmo, 5) # 2 occupied, 3 virtual
nocc = get_nocc(mp, per_kpoint=True)
nmo = get_nmo(mp, per_kpoint=True)
self.assertListEqual(nocc, [2, 1])
self.assertListEqual(nmo, [4, 4])
def test_frozen_list1(self):
mp = fake_mp(frozen=[1,], mo_occ=[np.array([2, 2, 2, 0, 0]), np.array([2, 2, 0, 0, 0])], nkpts=2)
nocc = get_nocc(mp)
nmo = get_nmo(mp)
self.assertAlmostEqual(nocc, 2)
self.assertAlmostEqual(nmo, 5) # 2 occupied, 3 virtual
nocc = get_nocc(mp, per_kpoint=True)
nmo = get_nmo(mp, per_kpoint=True)
self.assertListEqual(nocc, [2, 1])
self.assertListEqual(nmo, [4, 4])
def test_frozen_list2(self):
# Freeze virtual not contained in set
mp = fake_mp(frozen=[4, 5], mo_occ=[np.array([2, 2, 2, 0, 0]), np.array([2, 2, 0, 0, 0])], nkpts=2)
self.assertRaises(RuntimeError, get_nocc, mp)
self.assertRaises(RuntimeError, get_nmo, mp) # Fails because it pads by calling get_nocc
def test_frozen_repeated_orbital(self):
mp = fake_mp(frozen=[[1, 1], [0]], mo_occ=[np.array([2, 2, 2, 0, 0]), np.array([2, 2, 0, 0, 0])], nkpts=2)
self.assertRaises(RuntimeError, get_nocc, mp)
self.assertRaises(RuntimeError, get_nmo, mp) # Fails because it pads by calling get_nocc
def test_frozen_kpt_list1(self):
mp = fake_mp(frozen=[[0, 1,], [0]], mo_occ=[np.array([2, 2, 2, 0, 0]), np.array([2, 2, 0, 0, 0])], nkpts=2)
nocc = get_nocc(mp)
nmo = get_nmo(mp)
self.assertAlmostEqual(nocc, 1)
self.assertAlmostEqual(nmo, 4) # 1 occupied, 3 virtual
nocc = get_nocc(mp, per_kpoint=True)
nmo = get_nmo(mp, per_kpoint=True)
self.assertListEqual(nocc, [1, 1])
self.assertListEqual(nmo, [3, 4])
def test_frozen_kpt_list2(self):
mp = fake_mp(frozen=[[0,1],[],[0]], mo_occ=[np.array([2, 2, 2, 0, 0])] * 3, nkpts=3)
nocc = get_nocc(mp)
nmo = get_nmo(mp)
self.assertAlmostEqual(nocc, 3)
self.assertAlmostEqual(nmo, 5) # 2nd k-point has 3 occupied and 2 virtual orbitals
nocc = get_nocc(mp, per_kpoint=True)
nmo = get_nmo(mp, per_kpoint=True)
self.assertListEqual(nocc, [1, 3, 2])
self.assertListEqual(nmo, [3, 5, 4])
def test_frozen_kpt_list3(self):
mp = fake_mp(frozen=[[0,1,3],[3],[0]], mo_occ=[np.array([2, 2, 2, 0, 0])] * 3, nkpts=3)
nocc = get_nocc(mp)
nmo = get_nmo(mp)
self.assertAlmostEqual(nocc, 3)
self.assertAlmostEqual(nmo, 5) # 2nd k-point has 3 occupied and 2 virtual orbitals
nocc = get_nocc(mp, per_kpoint=True)
nmo = get_nmo(mp, per_kpoint=True)
self.assertListEqual(nocc, [1, 3, 2])
self.assertListEqual(nmo, [2, 4, 4])
def test_frozen_kpt_list3(self):
mp = fake_mp(frozen=[[0,1,3],[3],[0]], mo_occ=[np.array([2, 2, 2, 0, 0])] * 3, nkpts=3)
nocc = get_nocc(mp)
nmo = get_nmo(mp)
self.assertAlmostEqual(nocc, 3)
self.assertAlmostEqual(nmo, 5) # 2nd k-point has 3 occupied and 2 virtual orbitals
nocc = get_nocc(mp, per_kpoint=True)
nmo = get_nmo(mp, per_kpoint=True)
self.assertListEqual(nocc, [1, 3, 2])
self.assertListEqual(nmo, [2, 4, 4])
if __name__ == '__main__':
print("Full mask test")
unittest.main()
| gkc1000/pyscf | pyscf/pbc/mp/test/test_mask.py | Python | apache-2.0 | 4,496 | [
"PySCF"
] | 67f1f3ed0710360b99323e86abf3fa90ecbed47fcbf016f77498c67a1d393d75 |
#!/usr/bin/env python3
# coding:utf-8
"""
This module contains the necessary classes for serialising and querying ORF data.
"""
import os
import sqlite3
import pysam
from sqlalchemy import Column, String, Integer, ForeignKey, CHAR, Index, Float, Boolean
import sqlalchemy.exc
from sqlalchemy.orm import relationship, backref, column_property
from sqlalchemy.orm.session import sessionmaker
from sqlalchemy import select
from ..utilities.dbutils import DBBASE, Inspector, connect
from ..parsers import bed12 # , GFF
from .blast_serializer.query import Query
from ..utilities.log_utils import create_null_logger, check_logger
import pandas as pd
from ..exceptions import InvalidSerialization
import logging
import logging.handlers as logging_handlers
import multiprocessing as mp
import msgpack
import zlib
from ..configuration import DaijinConfiguration, MikadoConfiguration
# This is a serialization class, it must have a ton of attributes ...
# pylint: disable=too-many-instance-attributes
class Orf(DBBASE):
"""
Serialization class for ORFs derived from BED12 files.
"""
__tablename__ = "orf"
orf_id = Column(Integer, primary_key=True)
query_id = Column(Integer, ForeignKey(Query.query_id), unique=False)
start = Column(Integer, nullable=False)
end = Column(Integer, nullable=False)
orf_name = Column(String(200))
strand = Column(CHAR)
thick_start = Column(Integer, nullable=False)
thick_end = Column(Integer, nullable=False)
score = Column(Float)
has_start_codon = Column(Boolean, nullable=True)
has_stop_codon = Column(Boolean, nullable=True)
cds_len = Column(Integer)
phase = Column(Integer, nullable=False)
__table_args__ = (Index("orf_index", "query_id", "thick_start", "thick_end"),
Index("query_index", "query_id"))
query_object = relationship(Query, uselist=False,
backref=backref("orfs"), lazy="joined", innerjoin=True)
query = column_property(select([Query.query_name]).where(
Query.query_id == query_id))
def __init__(self, bed12_object, query_id):
if not isinstance(bed12_object, bed12.BED12):
raise TypeError("Invalid data type!")
self.query_id = query_id
self.start = bed12_object.start
self.end = bed12_object.end
self.thick_start = bed12_object.thick_start
self.thick_end = bed12_object.thick_end
self.orf_name = bed12_object.name
self.strand = bed12_object.strand
self.score = bed12_object.score
self.has_start_codon = bed12_object.has_start_codon
self.has_stop_codon = bed12_object.has_stop_codon
self.cds_len = bed12_object.cds_len
self.phase = bed12_object.phase
def __str__(self):
return "{chrom}\t{start}\t{end}".format(
chrom=self.query,
start=self.start,
end=self.end
)
def as_dict(self):
return {
"start": self.start,
"end": self.end,
"orf_name": self.orf_name,
"strand": self.strand,
"thick_start": self.thick_start,
"thick_end": self.thick_end,
"score": self.score,
"has_start_codon": self.has_start_codon,
"has_stop_codon": self.has_stop_codon,
"cds_len": self.cds_len,
"phase": self.phase
}
@staticmethod
def create_dict(bed12_object, query_id):
if bed12_object.header is False and bed12_object.start is None:
raise ValueError("Invalid BED! {}".format(bed12_object))
obj = bed12_object.as_simple_dict()
obj["query_id"] = query_id
return obj
@classmethod
def as_bed12_static(cls, state, query_name):
"""Class method to transform the mapper into a BED12 object.
Usable from outside the class.
:param state: the original state derived from the mapping.
:param query_name: the name of the query, retrieved from the Query associated object/table.
"""
__bed12 = bed12.BED12()
__bed12.header = False
__bed12.query = __bed12.chrom = query_name
__bed12.start = state.start
__bed12.end = state.end
__bed12.name = state.orf_name
__bed12.score = state.score
__bed12.strand = state.strand
__bed12.thick_start = state.thick_start
__bed12.thick_end = state.thick_end
__bed12.rgb = 0
__bed12.block_count = 1
__bed12.block_sizes = [state.end]
__bed12.block_starts = [0]
__bed12.transcriptomic = True
__bed12.phase = state.phase
# Verbose block, but it is necessary as raw extraction from SQL
# yields 0/1 instead of True/False
if state.has_start_codon:
__bed12.has_start_codon = True
else:
__bed12.has_start_codon = False
if state.has_stop_codon:
__bed12.has_stop_codon = True
else:
__bed12.has_stop_codon = False
return __bed12
def as_bed12(self):
"""Method to transform the mapper into a BED12 object."""
return self.as_bed12_static(self, self.query)
def line_parser_func(handle, fai, send_queue):
fai = pysam.FastaFile(fai)
for num, line in enumerate(open(handle)):
if line[0] == "#":
send_queue.put((num, line, None))
else:
_f = line.split("\t")
if _f[0] not in fai:
seq = None
else:
seq = zlib.compress(fai[line.split("\t")[0]].encode(), 1)
send_queue.put_nowait((num, line, seq))
send_queue.put("EXIT")
class OrfSerializer:
"""
This class has the purpose of automating the loading of ORF information into the SQL database.
"""
logger = create_null_logger("__orf_serializer__")
def __init__(self,
handle,
configuration=None,
logger=None):
"""Constructor function. Arguments:
- handle the BED12 file
- db Output DB
- fasta_index A SeqIO-like index of sequence records.
Alternatively, the path to the FASTA file. REQUIRED.
- maxobjects Integer. Indicates how big should the cache be
for objects to be loaded inside the DB
It is HIGHLY RECOMMENDED to provide the fasta index,
as it will make the population of the Query table much faster.
:param handle: the input BED12 file
:type handle: (io.TextIOWrapper|str)
:param configuration: a configuration dictionary
:type configuration: (MikadoConfiguration|DaijinConfiguration)
"""
if logger is not None:
self.logger = check_logger(logger)
fasta_index = configuration.serialise.files.transcripts
self.logger.debug("Serialising ORFs for transcripts in %s", fasta_index)
self._max_regression = configuration.serialise.max_regression
self._table = configuration.serialise.codon_table
self.procs = configuration.threads
self.single_thread = configuration.serialise.single_thread
self.adjust_start = configuration.serialise.start_adjustment
if self.single_thread:
self.procs = 1
if isinstance(fasta_index, str):
assert os.path.exists(fasta_index)
fasta_index = pysam.FastaFile(fasta_index)
elif isinstance(fasta_index, bytes):
fasta_index = fasta_index.decode()
assert os.path.exists(fasta_index)
fasta_index = pysam.FastaFile(fasta_index)
elif fasta_index is None or not isinstance(fasta_index, pysam.FastaFile):
exc = ValueError("A fasta index is needed for the serialization!")
self.logger.exception(exc)
return
self.fasta_index = fasta_index
if isinstance(handle, str):
self.is_bed12 = (".bed12" in handle or ".bed" in handle)
else:
self.is_bed12 = (".bed12" in handle.name or ".bed" in handle.name.endswith)
self.engine = connect(configuration, logger)
self._handle = handle
Session = sessionmaker(bind=self.engine, autocommit=False, autoflush=False, expire_on_commit=False)
session = Session()
# session.configure(bind=self.engine)
inspector = Inspector.from_engine(self.engine)
if Orf.__tablename__ not in inspector.get_table_names():
DBBASE.metadata.create_all(self.engine)
self.session = session
self.maxobjects = configuration.serialise.max_objects
self.log_level = configuration.log_settings.log_level
def load_fasta(self):
"""
Private method to load data from the FASTA file into the database.
"""
objects = []
cache = pd.read_sql_table("query", self.engine, index_col="query_name", columns=["query_name", "query_id"])
cache = cache.to_dict()["query_id"]
assert isinstance(self.fasta_index, pysam.FastaFile)
done = 0
self.logger.debug("%d entries already present in db, %d in the index",
len([fasta_key for fasta_key in self.fasta_index.references if fasta_key not in cache]),
self.fasta_index.nreferences)
found = set()
for ref, length in zip(self.fasta_index.references, self.fasta_index.lengths):
if ref in cache:
continue
objects.append({"query_name": ref, "query_length": length})
assert ref not in found, ref
found.add(ref)
if len(objects) >= self.maxobjects:
done += len(objects)
self.engine.execute(Query.__table__.insert(), objects)
self.session.commit()
self.logger.debug("Loaded %d transcripts into query table", done)
objects = []
done += len(objects)
self.engine.execute(Query.__table__.insert(), objects)
self.session.commit()
self.logger.debug("Finished loading %d transcripts into query table", done)
return
def __serialize_single_thread(self):
self.bed12_parser = bed12.Bed12Parser(self._handle,
fasta_index=self.fasta_index,
logger=self.logger,
is_gff=(not self.is_bed12),
transcriptomic=True,
max_regression=self._max_regression,
table=self._table)
objects = []
done = 0
not_found = set()
for row in self.bed12_parser:
if row.header is True:
continue
if row.invalid is True:
self.logger.warning("Invalid entry, reason: %s\n%s",
row.invalid_reason,
row)
continue
if row.id in self.query_cache:
current_query = self.query_cache[row.id]
elif not self.initial_cache:
current_query = Query(row.id, row.end)
not_found.add(row.id)
self.session.add(current_query)
self.session.commit()
self.query_cache[current_query.query_name] = current_query.query_id
current_query = current_query.query_id
else:
self.logger.critical(
"The provided ORFs do not match the transcripts provided and already present in the database.\
Please check your input files. Rogue ID: %s", row.id)
raise InvalidSerialization
# current_junction = Orf(row, current_query)
obj = Orf.create_dict(row, current_query)
if obj["start"] is None or not isinstance(obj["start"], int):
raise ValueError("Invalid object: {}".format(obj))
# continue
objects.append(Orf.create_dict(row, current_query))
if len(objects) >= self.maxobjects:
done += len(objects)
self.session.begin(subtransactions=True)
# self.session.bulk_save_objects(objects)
self.engine.execute(
Orf.__table__.insert(),
objects
)
self.session.commit()
self.logger.debug("Loaded %d ORFs into the database", done)
objects = []
done += len(objects)
# self.session.begin(subtransactions=True)
# self.session.bulk_save_objects(objects, update_changed_only=False)
self.engine.execute(
Orf.__table__.insert(),
objects
)
self.session.commit()
self.session.close()
self.logger.info("Finished loading %d ORFs into the database", done)
orfs = pd.read_sql_table("orf", self.engine, index_col="query_id")
if orfs.shape[0] != done:
raise ValueError("I should have serialised {} ORFs, but {} are present!".format(done, orfs.shape[0]))
def __serialize_multiple_threads(self):
""""""
manager = mp.Manager()
send_queue = manager.Queue(-1)
return_queue = manager.JoinableQueue(-1)
self.logging_queue = mp.Queue(-1)
self.logger_queue_handler = logging_handlers.QueueHandler(self.logging_queue)
self.queue_logger = logging.getLogger("parser")
self.queue_logger.addHandler(self.logger_queue_handler)
self.queue_logger.setLevel(self.log_level)
self.queue_logger.propagate = False
self.log_writer = logging_handlers.QueueListener(self.logging_queue, self.logger)
self.log_writer.start()
line_parser = mp.Process(target=line_parser_func,
args=(self._handle, self.fasta_index.filename, send_queue))
line_parser.start()
parsers = [bed12.Bed12ParseWrapper(
identifier=index,
rec_queue=send_queue,
log_queue=self.logging_queue,
level=self.log_level,
return_queue=return_queue,
fasta_index=None,
is_gff=(not self.is_bed12),
transcriptomic=True,
max_regression=self._max_regression,
table=self._table) for index in range(self.procs)]
[_.start() for _ in parsers]
not_found = set()
done = 0
objects = []
procs_done = 0
while True:
num = return_queue.get()
if num in ("FINISHED", b"FINISHED"):
procs_done += 1
if procs_done == self.procs:
break
else:
continue
num, obj = num
try:
loaded_obj = msgpack.loads(obj, raw=False)
except TypeError:
raise TypeError(obj)
if loaded_obj["id"] in self.query_cache:
current_query = self.query_cache[loaded_obj["id"]]
elif not self.initial_cache:
current_query = Query(loaded_obj["id"], loaded_obj["end"])
not_found.add(loaded_obj["id"])
self.session.add(current_query)
self.session.commit()
self.query_cache[current_query.query_name] = current_query.query_id
current_query = current_query.query_id
else:
self.logger.critical(
"The provided ORFs do not match the transcripts provided and already present in the database.\
This could be due to having called the ORFs on a FASTA file different from `mikado_prepared.fasta`, the output of \
mikado prepare. If this is the case, please use mikado_prepared.fasta to call the ORFs and then restart \
`mikado serialise` using them as input.")
raise InvalidSerialization
loaded_obj["query_id"] = current_query
objects.append(loaded_obj)
if len(objects) >= self.maxobjects:
done += len(objects)
self.session.begin(subtransactions=True)
self.engine.execute(
Orf.__table__.insert(),
objects
)
self.session.commit()
self.logger.debug("Loaded %d ORFs into the database", done)
objects = []
[proc.join() for proc in parsers]
done += len(objects)
# self.session.begin(subtransactions=True)
# self.session.bulk_save_objects(objects, update_changed_only=False)
if objects:
self.engine.execute(
Orf.__table__.insert(),
objects
)
self.session.commit()
self.session.close()
self.logger.info("Finished loading %d ORFs into the database", done)
manager.shutdown()
orfs = pd.read_sql_table("orf", self.engine, index_col="query_id")
if orfs.shape[0] != done:
raise ValueError("I should have serialised {} ORFs, but {} are present!".format(done, orfs.shape[0]))
def serialize(self):
"""
This method performs the parsing of the ORF file and the
loading into the SQL database.
"""
self.load_fasta()
self.query_cache = pd.read_sql_table("query", self.engine, index_col="query_name",
columns=["query_name", "query_id"])
self.query_cache = self.query_cache.to_dict()["query_id"]
self.initial_cache = (len(self.query_cache) > 0)
if self.procs == 1:
self.__serialize_single_thread()
else:
try:
self.__serialize_multiple_threads()
finally:
pass
def __call__(self):
"""
Alias for serialize
"""
try:
[idx.drop(bind=self.engine) for idx in Orf.__table__.indexes]
except (sqlalchemy.exc.IntegrityError, sqlite3.IntegrityError) as exc:
self.logger.debug("Corrupt table found, deleting and restarting")
self.session.query(Orf).delete()
try:
self.serialize()
except (sqlalchemy.exc.IntegrityError, sqlite3.IntegrityError) as exc:
self.logger.error("DB corrupted, reloading data. Error: %s",
exc)
self.session.query(Query).delete()
self.session.query(Orf).delete()
try:
self.serialize()
except InvalidSerialization:
raise
finally:
[idx.create(bind=self.engine) for idx in Orf.__table__.indexes]
| lucventurini/mikado | Mikado/serializers/orf.py | Python | lgpl-3.0 | 18,868 | [
"pysam"
] | 2e54b6bc13ca70f6e600632f45ac746ef12b574da4b02311654fad99da890468 |
"""
==================================
Constants (:mod:`scipy.constants`)
==================================
.. module:: scipy.constants
Physical and mathematical constants and units.
Mathematical constants
======================
============ =================================================================
``pi`` Pi
``golden`` Golden ratio
============ =================================================================
Physical constants
==================
============= =================================================================
``c`` speed of light in vacuum
``mu_0`` the magnetic constant :math:`\mu_0`
``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0`
``h`` the Planck constant :math:`h`
``hbar`` :math:`\hbar = h/(2\pi)`
``G`` Newtonian constant of gravitation
``g`` standard acceleration of gravity
``e`` elementary charge
``R`` molar gas constant
``alpha`` fine-structure constant
``N_A`` Avogadro constant
``k`` Boltzmann constant
``sigma`` Stefan-Boltzmann constant :math:`\sigma`
``Wien`` Wien displacement law constant
``Rydberg`` Rydberg constant
``m_e`` electron mass
``m_p`` proton mass
``m_n`` neutron mass
============= =================================================================
Constants database
------------------
In addition to the above variables, :mod:`scipy.constants` also contains the
2010 CODATA recommended values [CODATA2010]_ database containing more physical
constants.
.. autosummary::
:toctree: generated/
value -- Value in physical_constants indexed by key
unit -- Unit in physical_constants indexed by key
precision -- Relative precision in physical_constants indexed by key
find -- Return list of physical_constant keys with a given string
ConstantWarning -- Constant sought not in newest CODATA data set
.. data:: physical_constants
Dictionary of physical constants, of the format
``physical_constants[name] = (value, unit, uncertainty)``.
Available constants:
====================================================================== ====
%(constant_names)s
====================================================================== ====
Units
=====
SI prefixes
-----------
============ =================================================================
``yotta`` :math:`10^{24}`
``zetta`` :math:`10^{21}`
``exa`` :math:`10^{18}`
``peta`` :math:`10^{15}`
``tera`` :math:`10^{12}`
``giga`` :math:`10^{9}`
``mega`` :math:`10^{6}`
``kilo`` :math:`10^{3}`
``hecto`` :math:`10^{2}`
``deka`` :math:`10^{1}`
``deci`` :math:`10^{-1}`
``centi`` :math:`10^{-2}`
``milli`` :math:`10^{-3}`
``micro`` :math:`10^{-6}`
``nano`` :math:`10^{-9}`
``pico`` :math:`10^{-12}`
``femto`` :math:`10^{-15}`
``atto`` :math:`10^{-18}`
``zepto`` :math:`10^{-21}`
============ =================================================================
Binary prefixes
---------------
============ =================================================================
``kibi`` :math:`2^{10}`
``mebi`` :math:`2^{20}`
``gibi`` :math:`2^{30}`
``tebi`` :math:`2^{40}`
``pebi`` :math:`2^{50}`
``exbi`` :math:`2^{60}`
``zebi`` :math:`2^{70}`
``yobi`` :math:`2^{80}`
============ =================================================================
Weight
------
================= ============================================================
``gram`` :math:`10^{-3}` kg
``metric_ton`` :math:`10^{3}` kg
``grain`` one grain in kg
``lb`` one pound (avoirdupous) in kg
``oz`` one ounce in kg
``stone`` one stone in kg
``grain`` one grain in kg
``long_ton`` one long ton in kg
``short_ton`` one short ton in kg
``troy_ounce`` one Troy ounce in kg
``troy_pound`` one Troy pound in kg
``carat`` one carat in kg
``m_u`` atomic mass constant (in kg)
================= ============================================================
Angle
-----
================= ============================================================
``degree`` degree in radians
``arcmin`` arc minute in radians
``arcsec`` arc second in radians
================= ============================================================
Time
----
================= ============================================================
``minute`` one minute in seconds
``hour`` one hour in seconds
``day`` one day in seconds
``week`` one week in seconds
``year`` one year (365 days) in seconds
``Julian_year`` one Julian year (365.25 days) in seconds
================= ============================================================
Length
------
================= ============================================================
``inch`` one inch in meters
``foot`` one foot in meters
``yard`` one yard in meters
``mile`` one mile in meters
``mil`` one mil in meters
``pt`` one point in meters
``survey_foot`` one survey foot in meters
``survey_mile`` one survey mile in meters
``nautical_mile`` one nautical mile in meters
``fermi`` one Fermi in meters
``angstrom`` one Angstrom in meters
``micron`` one micron in meters
``au`` one astronomical unit in meters
``light_year`` one light year in meters
``parsec`` one parsec in meters
================= ============================================================
Pressure
--------
================= ============================================================
``atm`` standard atmosphere in pascals
``bar`` one bar in pascals
``torr`` one torr (mmHg) in pascals
``psi`` one psi in pascals
================= ============================================================
Area
----
================= ============================================================
``hectare`` one hectare in square meters
``acre`` one acre in square meters
================= ============================================================
Volume
------
=================== ========================================================
``liter`` one liter in cubic meters
``gallon`` one gallon (US) in cubic meters
``gallon_imp`` one gallon (UK) in cubic meters
``fluid_ounce`` one fluid ounce (US) in cubic meters
``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters
``bbl`` one barrel in cubic meters
=================== ========================================================
Speed
-----
================= ==========================================================
``kmh`` kilometers per hour in meters per second
``mph`` miles per hour in meters per second
``mach`` one Mach (approx., at 15 C, 1 atm) in meters per second
``knot`` one knot in meters per second
================= ==========================================================
Temperature
-----------
===================== =======================================================
``zero_Celsius`` zero of Celsius scale in Kelvin
``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins
===================== =======================================================
.. autosummary::
:toctree: generated/
C2K
K2C
F2C
C2F
F2K
K2F
Energy
------
==================== =======================================================
``eV`` one electron volt in Joules
``calorie`` one calorie (thermochemical) in Joules
``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules
``erg`` one erg in Joules
``Btu`` one British thermal unit (International Steam Table) in Joules
``Btu_th`` one British thermal unit (thermochemical) in Joules
``ton_TNT`` one ton of TNT in Joules
==================== =======================================================
Power
-----
==================== =======================================================
``hp`` one horsepower in watts
==================== =======================================================
Force
-----
==================== =======================================================
``dyn`` one dyne in newtons
``lbf`` one pound force in newtons
``kgf`` one kilogram force in newtons
==================== =======================================================
Optics
------
.. autosummary::
:toctree: generated/
lambda2nu
nu2lambda
References
==========
.. [CODATA2010] CODATA Recommended Values of the Fundamental
Physical Constants 2010.
http://physics.nist.gov/cuu/Constants/index.html
"""
# Modules contributed by BasSw (wegwerp@gmail.com)
from codata import *
from constants import *
from codata import _obsolete_constants
_constant_names = [(_k.lower(), _k, _v)
for _k, _v in physical_constants.items()
if _k not in _obsolete_constants]
_constant_names = "\n".join(["``%s``%s %s %s" % (_x[1], " "*(66-len(_x[1])),
_x[2][0], _x[2][1])
for _x in sorted(_constant_names)])
__doc__ = __doc__ % dict(constant_names=_constant_names)
del _constant_names
__all__ = filter(lambda s:not s.startswith('_'),dir())
from numpy.testing import Tester
test = Tester().test
| teoliphant/scipy | scipy/constants/__init__.py | Python | bsd-3-clause | 9,743 | [
"Avogadro"
] | da84828db0850eb1a265774e2493aa0aeab8f721492f669933593c2df5481c29 |
# Author: Carlos Xavier Hernandez <cxh@stanford.edu>
# Contributors:
# Copyright (c) 2016, Stanford University and the Authors
# All rights reserved.
import numpy as np
from collections import OrderedDict
from six import string_types
from ..featurizer import Featurizer
class FeatureSelector(Featurizer):
"""Concatenates results of multiple feature extraction objects.
This estimator applies a list of feature_extraction objects then
concatenates the results. This is useful to combine several feature
extraction mechanisms into a single transformer.
Note: Users should consider using `msmbuilder.preprocessing.StandardScaler`
to normalize their data after combining feature sets.
Parameters
----------
features : list of (str, msmbuilder.feature_extraction) tuples
List of feature_extraction objects to be applied to the data.
The first half of each tuple is the name of the feature_extraction.
which_feat : list or str
Either a string or a list of strings of features to include in the
transformer.
"""
@property
def which_feat(self):
return self._which_feat
@which_feat.setter
def which_feat(self, value):
if isinstance(value, string_types):
value = [value]
elif isinstance(value, dict):
raise TypeError('Not a valid feature list')
elif not all([feat in self.feat_list for feat in value]):
raise ValueError('Not a valid feature')
self._which_feat = list(value)
def __init__(self, features, which_feat=None):
self.features = OrderedDict(features)
self.feat_list = list(self.features)
which_feat = which_feat if which_feat else self.feat_list[:]
self.which_feat = which_feat
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space.
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
"""
return np.concatenate([self.features[feat].partial_transform(traj)
for feat in self.which_feat], axis=1)
def describe_features(self, traj):
""" Return a list of dictionaries describing the features. Follows
the ordering of featurizers in self.which_feat.
Parameters
----------
traj : mdtraj.Trajectory
The trajectory to describe
Returns
-------
feature_descs : list of dict
Dictionary describing each feature with the following information
about the atoms participating in each feature
- resnames: unique names of residues
- atominds: atom indicies involved in the feature
- resseqs: unique residue sequence ids (not necessarily
0-indexed)
- resids: unique residue ids (0-indexed)
- featurizer: featurizer dependent
- featuregroup: other info for the featurizer
"""
all_res = []
for feat in self.which_feat:
all_res.extend(self.features[feat].describe_features(traj))
return all_res
class FeatureSlicer(Featurizer):
"""Extracts features (e.g. subsets) from data along the feature dimension.
Parameters
----------
feat : MSMBuilder Featurizer object, requires an initialized
MSMBuilder Featurizer object.
indices : array_like of integer, optional
If given, extract only these features by index. This corresponds
to selecting these columns from the feature-trajectories.
"""
def __init__(self, feat=None, indices=None):
if feat is None:
raise ValueError("Please provide a fitted "
"featurizer object")
if indices is None:
raise ValueError("Please specify indices")
if not (isinstance(indices, list)
or isinstance(indices, np.ndarray)
or isinstance(indices, np.int)):
raise ValueError("Type of indices is neither a list/array "
"nor an int.")
self.feat = feat
self.indices = np.array(indices)
def partial_transform(self, traj):
"""Slice a single input array along to select a subset of features.
Parameters
----------
traj : MDtraj trajectory object.
Returns
-------
sliced_traj : np.ndarray shape=(n_samples, n_feature_subset)
Slice of traj
"""
return self.feat.partial_transform(traj)[:, self.indices]
def describe_features(self, traj):
"""
Returns a sliced version of the feature descriptor
Parameters
----------
traj : MDtraj trajectory object
Returns
-------
list of sliced dictionaries describing each feature.
"""
features_list = self.feat.describe_features(traj)
return [features_list[i] for i in self.indices]
| Eigenstate/msmbuilder | msmbuilder/feature_selection/featureselector.py | Python | lgpl-2.1 | 5,426 | [
"MDTraj"
] | 6cd34b02e942eea64d113137c37c3ce9b4e86ce72576a4b90629765403f97f5e |
# -*- coding: utf-8 -*-
"""
Krige CV
--------
Searching for optimal kriging parameters with cross validation
"""
import numpy as np
from pykrige.rk import Krige
from sklearn.model_selection import GridSearchCV
# 2D Kring param opt
param_dict = {
"method": ["ordinary", "universal"],
"variogram_model": ["linear", "power", "gaussian", "spherical"],
# "nlags": [4, 6, 8],
# "weight": [True, False]
}
estimator = GridSearchCV(Krige(), param_dict, verbose=True, return_train_score=True)
# dummy data
X = np.random.randint(0, 400, size=(100, 2)).astype(float)
y = 5 * np.random.rand(100)
# run the gridsearch
estimator.fit(X=X, y=y)
if hasattr(estimator, "best_score_"):
print("best_score R² = {:.3f}".format(estimator.best_score_))
print("best_params = ", estimator.best_params_)
print("\nCV results::")
if hasattr(estimator, "cv_results_"):
for key in [
"mean_test_score",
"mean_train_score",
"param_method",
"param_variogram_model",
]:
print(" - {} : {}".format(key, estimator.cv_results_[key]))
# 3D Kring param opt
param_dict3d = {
"method": ["ordinary3d", "universal3d"],
"variogram_model": ["linear", "power", "gaussian", "spherical"],
# "nlags": [4, 6, 8],
# "weight": [True, False]
}
estimator = GridSearchCV(Krige(), param_dict3d, verbose=True, return_train_score=True)
# dummy data
X3 = np.random.randint(0, 400, size=(100, 3)).astype(float)
y = 5 * np.random.rand(100)
# run the gridsearch
estimator.fit(X=X3, y=y)
if hasattr(estimator, "best_score_"):
print("best_score R² = {:.3f}".format(estimator.best_score_))
print("best_params = ", estimator.best_params_)
print("\nCV results::")
if hasattr(estimator, "cv_results_"):
for key in [
"mean_test_score",
"mean_train_score",
"param_method",
"param_variogram_model",
]:
print(" - {} : {}".format(key, estimator.cv_results_[key]))
| bsmurphy/PyKrige | examples/08_krige_cv.py | Python | bsd-3-clause | 1,953 | [
"Gaussian"
] | 3c93a3810beda712ed8a58fc79bf2eb157592778144444bf96166e72a146ff1b |
# Copyright (C) 2017,2018
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
********************************
espressopp.analysis.SubregionTracking
********************************
Class to compute the number of (coarse-grained) particles that belong to a specified particle list and that reside in a specified subregion of the simulation box (when specifying a list of particles that reside in a certain subregion at the beginning of the simulation, the routine can be used, for example, to track how many of these particles still stay in the same region after some simulation time).
Examples:
>>> subregiontracking_instance = espressopp.analysis.SubregionTracking(system, span=0.75, geometry=1, pidlist=tracklist, center=[Lx/2, Ly/2, Lz/2])
>>> # creates instance of the class for calculating number of particles that belong to particle id list tracklist and reside in a subregion which is centered in the simulation box and bounded within +-0.75 in x-direction from the center
>>> number_of_particles = subregiontracking_instance.compute()
>>> # computes the number of particles belonging to specified particle id list in specified subregion of the simulation box
.. function:: espressopp.analysis.SubregionTracking(self, system, span, geometry, center, pidlist)
Constructs the SubregionTracking object.
:param system: system object
:param span: radius of the subregion to be considered
:param geometry: geometry of the subregion. Can only be in ['spherical', 'bounded-x', 'bounded-y', 'bounded-z']
:param center: center of the subregion
:param pidlist: list of particle ids of coarse-grained particles that are counted in the specified subregion
:type system: std::shared_ptr<System>
:type span: real
:type geometry: str in ['spherical', 'bounded-x', 'bounded-y', 'bounded-z']
:type center: list of 3 reals (x,y,z coordinates of center)
:type pidlist: list of ints
.. function:: espressopp.analysis.SubregionTracking.compute():
Calculates the number of particles that are present in specified subregion and that belong to specified particle id list.
:rtype: real
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.Observable import *
from _espressopp import analysis_SubregionTracking
class SubregionTrackingLocal(ObservableLocal, analysis_SubregionTracking):
'The (local) class for computing the number of particles that are present in a specified subregion of the system and that belong to a specified group of particles.'
def __init__(self, system, span, geometry, center, pidlist):
if geometry not in ['spherical', 'bounded-x', 'bounded-y', 'bounded-z']:
raise ValueError('Error: Geometry must be in ["spherical", "bounded-x", "bounded-y", "bounded-z"]. Your input: {}'.format(geometry))
if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
geometrydict = {'spherical': 0, 'bounded-x': 1, 'bounded-y': 2, 'bounded-z': 3}
cxxinit(self, analysis_SubregionTracking, system, span, geometrydict[geometry])
self.cxxclass.setCenter(self, center[0], center[1], center[2])
for pid in pidlist:
self.cxxclass.addPID(self, pid)
if pmi.isController :
class SubregionTracking(Observable, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.analysis.SubregionTrackingLocal'
)
| espressopp/espressopp | src/analysis/SubregionTracking.py | Python | gpl-3.0 | 4,280 | [
"ESPResSo"
] | ccd2ce4aa8505e6c247f09fa68f1768b4e4f193a8aaaa8a9d584887798626995 |
import numpy as np
from ase.atoms import Atoms
from ase.calculators.singlepoint import SinglePointCalculator
from ase.data import atomic_numbers
def get_atoms(cmr_data):
if type(cmr_data)==str:
raise RuntimeError('cmr db-file: the specified cmr group file does not contain any images, only references.\n'+
'This error could be caused by an older version of CMR - or a group file containing only references to other db-files.')
positions = cmr_data.get('ase_positions')
numbers = cmr_data.get('ase_atomic_numbers')
symbols = cmr_data.get('ase_chemical_symbols')
cell = cmr_data.get('ase_cell')
pbc = cmr_data.get('ase_pbc')
tags = np.array(cmr_data.get('ase_tags'))
magmoms = np.array(cmr_data.get('ase_magnetic_moments'))
energy = cmr_data.get('ase_potential_energy')
forces = cmr_data.get('ase_forces')
if numbers is None and not symbols is None:
numbers = [atomic_numbers[x] for x in symbols]
if numbers is None or positions is None:
raise RuntimeError('cmr db-file: there is no or not enough ase data available in the specified db-file.')
atoms = Atoms(positions=positions,
numbers=numbers,
cell=cell,
pbc=pbc)
if tags.any():
atoms.set_tags(list(tags))
if magmoms.any():
atoms.set_initial_magnetic_moments(magmoms)
else:
magmoms = None
atoms.calc = SinglePointCalculator(energy, forces, None, magmoms,
atoms)
return atoms
def read_db(filename, index):
import cmr
r = cmr.read(filename)
if not r.has_key("ase_positions") and r.is_group():
hashes = r.get_member_hashes()
hashes = hashes[index]
if len(hashes)==0:
raise RuntimeError('cmr db-file: could not find any group members.\n'+
'This error could be caused by an older version of CMR - or a group file containing only references to other db-files.')
if type(hashes)==list:
return [get_atoms(r.get_xmldata(hash)) for hash in hashes]
return get_atoms(r.get_xmldata(hashes))
else:
return get_atoms(r)
def write_db(filename, images, **kwargs):
try:
import cmr
cmr.atoms2cmr(images, **kwargs).write(filename)
except:
raise
raise NotAvailable('CMR version>0.3.2 is required')
| JConwayAWT/PGSS14CC | lib/python/multimetallics/ase/io/cmr_io.py | Python | gpl-2.0 | 2,460 | [
"ASE"
] | 7ffba2c82d1e2bad8bf845d4a1cce8b67a7210f974f5eb8c2555fe10e63f056f |
# fMBT, free Model Based Testing tool
# Copyright (c) 2012, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
"""
eyenfinger - DEPRECATED GUI testing library based on OCR and X event generation
Use fmbtx11 instead.
Configuring low-level key presses
---------------------------------
printEventsFromFile() prints input events from Linux chosen
/dev/input/eventXX file. Example:
python -c '
import eyenfinger
eyenfinger.printEventsFromFile("/dev/input/event0")
'
Alternatively, you can use device names in /proc/bus/input/devices and
printEventsFromDevice("device name").
Configuring OCR
---------------
autoconfigure() evaluates number of preprocessing filters to give the
best result on finding given words from given image. Example:
python -c '
from eyenfinger import *
autoconfigure("screenshot.png", ["Try", "to", "find", "these", "words"])
'
evaluatePreprocessFilter() highlights words detected on given image. Example:
python -c '
from eyenfinger import *
evaluatePreprocessFilter("screenshot.png", "-sharpen 5 -resize 1600x", ["File", "View"])
'
setPreprocessFilter() sets given filter to be used when reading text from images.
Debugging
---------
iClickWord() capture parameter visualises coordinates to be clicked. Example:
python -c '
from eyenfinger import *
setPreprocessFilter("-sharpen 5 -filter Mitchell -resize 1600x -level 40%,50%,3.0")
iRead(source="screenshot.png")
iClickWord("[initial", clickPos=(-2,3), capture="highlight.png", dryRun=True)
'
"""
import distutils.sysconfig
import time
import subprocess
import re
import math
import html.entities
import sys
import os
import tempfile
import atexit
import shlex
import shutil
import ctypes
import platform
import struct
import warnings
import fmbt_config
def _DEPRECATED():
warnings.warn("eyenfinger.py API is deprecated, use fmbtx11 instead.",
DeprecationWarning, stacklevel=2)
_g_preprocess = "-sharpen 5 -filter Mitchell -resize 1920x1600 -level 40%%,70%%,5.0 -sharpen 5"
_g_readImage = None
_g_origImage = None
_g_hocr = ""
_g_words = None
_g_lastWindow = None
_g_tesseractPSM = "-psm"
_g_defaultClickDryRun = False
_g_defaultDelayedDrawing = False
_g_defaultIconMatch = 1.0
_g_defaultIconColorMatch = 1.0
_g_defaultIconOpacityLimit = 0.0
_g_defaultInputKeyDevice = None
_g_defaultReadWithOCR = True
# windowsOffsets maps window-id to (x, y) pair.
_g_windowOffsets = {None: (0,0)}
# windowsSizes maps window-id to (width, height) pair.
_g_windowSizes = {None: (0,0)}
# screenSize is a (width, height) pair.
_g_screenSize = (0, 0)
_g_tempdir = tempfile.mkdtemp(prefix="eyenfinger.%s." % (os.getpid(),))
SCREENSHOT_FILENAME = _g_tempdir + "/screenshot.png"
LOG_FILENAME = _g_tempdir + "/eyenfinger.log"
MOUSEEVENT_MOVE, MOUSEEVENT_CLICK, MOUSEEVENT_DOWN, MOUSEEVENT_UP = range(4)
# Xkeys contains key names known to X11, see keysymdef.h.
Xkeys = [
"BackSpace", "Tab", "Linefeed", "Clear", "Return", "Pause",
"Scroll_Lock", "Sys_Req", "Escape", "Delete", "Home", "Left",
"Up", "Right", "Down", "Prior", "Page_Up", "Next", "Page_Down",
"End", "Begin", "F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8",
"F9", "F10", "F11", "F12", "Shift_L", "Shift_R", "Control_L",
"Control_R", "Caps_Lock", "Shift_Lock", "Meta_L", "Meta_R",
"Alt_L", "Alt_R", "space", "exclam", "quotedbl", "numbersign",
"dollar", "percent", "ampersand", "apostrophe", "quoteright",
"parenleft", "parenright", "asterisk", "plus", "comma", "minus",
"period", "slash", "0", "1", "2", "3", "4", "5", "6", "7", "8",
"9", "colon", "semicolon", "less", "equal", "greater", "question",
"at", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L",
"M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y",
"Z", "bracketleft", "backslash", "bracketright", "asciicircum",
"underscore", "grave", "quoteleft", "a", "b", "c", "d", "e", "f",
"g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s",
"t", "u", "v", "w", "x", "y", "z", "braceleft", "bar",
"braceright"]
# InputKeys contains key names known to input devices, see
# linux/input.h or http://www.usb.org/developers/hidpage. The order is
# significant, because keyCode = InputKeys.index(keyName).
InputKeys = [
"RESERVED", "ESC","1", "2", "3", "4", "5", "6", "7", "8", "9", "0",
"MINUS", "EQUAL", "BACKSPACE", "TAB",
"Q", "W", "E", "R", "T", "Y", "U", "I", "O", "P",
"LEFTBRACE", "RIGHTBRACE", "ENTER", "LEFTCTRL",
"A", "S", "D", "F", "G", "H", "J", "K", "L",
"SEMICOLON", "APOSTROPHE", "GRAVE", "LEFTSHIFT", "BACKSLASH",
"Z", "X", "C", "V", "B", "N", "M",
"COMMA", "DOT", "SLASH", "RIGHTSHIFT", "KPASTERISK", "LEFTALT",
"SPACE", "CAPSLOCK",
"F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9", "F10",
"NUMLOCK", "SCROLLLOCK",
"KP7", "KP8", "KP9", "KPMINUS",
"KP4", "KP5", "KP6", "KPPLUS",
"KP1", "KP2", "KP3", "KP0", "KPDOT",
"undefined0",
"ZENKAKUHANKAKU", "102ND", "F11", "F12", "RO",
"KATAKANA", "HIRAGANA", "HENKAN", "KATAKANAHIRAGANA", "MUHENKAN",
"KPJPCOMMA", "KPENTER", "RIGHTCTRL", "KPSLASH", "SYSRQ", "RIGHTALT",
"LINEFEED", "HOME", "UP", "PAGEUP", "LEFT", "RIGHT", "END", "DOWN",
"PAGEDOWN", "INSERT", "DELETE", "MACRO",
"MUTE", "VOLUMEDOWN", "VOLUMEUP",
"POWER",
"KPEQUAL", "KPPLUSMINUS", "PAUSE", "SCALE", "KPCOMMA", "HANGEUL",
"HANGUEL", "HANJA", "YEN", "LEFTMETA", "RIGHTMETA", "COMPOSE"]
_inputKeyShorthands = {
"-": "MINUS", "=": "EQUAL",
"[": "LEFTBRACE", "]": "RIGHTBRACE", "\n": "ENTER",
";": "SEMICOLON",
",": "COMMA", ".": "DOT", "/": "SLASH",
" ": "SPACE" }
basestring = (str, bytes)
class EyenfingerError(Exception):
pass
class BadMatch (EyenfingerError):
pass
class BadWindowName (EyenfingerError):
pass
class BadSourceImage(EyenfingerError):
pass
class BadIconImage(EyenfingerError):
pass
class NoOCRResults(EyenfingerError):
pass
try:
import fmbt
def _log(msg):
fmbt.adapterlog("eyenfinger: %s" % (msg,))
except ImportError:
def _log(msg):
open(LOG_FILENAME, "a").write("%13.2f %s\n" %
(time.time(), msg))
try:
_libpath = ["", ".",
os.path.dirname(os.path.abspath(__file__)),
distutils.sysconfig.get_python_lib(plat_specific=1)]
_suffix = ".so"
if os.name == "nt":
_suffix = ".dll"
for _dirname in _libpath:
try:
eye4graphics = ctypes.CDLL(os.path.join(_dirname, "eye4graphics"+_suffix))
break
except: pass
else:
raise ImportError("%s cannot load eye4graphics%s" % (__file__, _suffix))
class Bbox(ctypes.Structure):
_fields_ = [("left", ctypes.c_int32),
("top", ctypes.c_int32),
("right", ctypes.c_int32),
("bottom", ctypes.c_int32),
("error", ctypes.c_int32)]
except Exception as e:
Bbox = None
eye4graphics = None
_log('Loading icon recognition library failed: "%s".' % (e,))
# See struct input_event in /usr/include/linux/input.h
if platform.architecture()[0] == "32bit":
_InputEventStructSpec = 'IIHHi'
else:
_InputEventStructSpec = 'QQHHi'
# Event and keycodes are in input.h, too.
_EV_KEY = 0x01
# _inputKeyNameCodeMap is a dictionary keyName -> keyCode
_inputKeyNameCodeMap = {}
for code, name in enumerate(InputKeys):
_inputKeyNameCodeMap[name] = code
def _inputKeyNameToCode(keyName):
if keyName in _inputKeyNameCodeMap:
return _inputKeyNameCodeMap[keyName]
elif keyName in _inputKeyShorthands:
return _inputKeyNameCodeMap[_inputKeyShorthands[keyName]]
else:
raise ValueError('Invalid key name "%s"' % (keyName,))
def error(msg, exitstatus=1):
sys.stderr.write("eyenfinger: %s\n" % (msg,))
sys.exit(1)
def printEventsFromFile(filename):
fd = os.open(filename, os.O_RDONLY)
try:
while 1:
evString = os.read(fd, struct.calcsize(_InputEventStructSpec))
if not evString: break
tim, tus, typ, cod, val = struct.unpack(_InputEventStructSpec, evString)
if cod < len(InputKeys):
nam = InputKeys[cod]
else:
nam = "N/A"
print("time: %8s, susc: %8s, type: %8s, keyCode: %5s name: %10s value: %8s" % \
(tim, tus, typ, cod, nam, val))
finally:
os.close(fd)
def printEventsFromDevice(deviceName):
devices = dict(_listInputDevices())
if not deviceName in devices:
error('Unknown device "%s". Available devices: %s' %
(deviceName, sorted(devices.keys())))
else:
printEventsFromFile(devices[deviceName])
def _exitHandler():
shutil.rmtree(_g_tempdir, ignore_errors=True)
atexit.register(_exitHandler)
def _runcmd(cmd):
global _g_last_runcmd_error
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
output, _g_last_runcmd_error = stdout.decode("utf-8"), stderr.decode("utf-8")
exit_status = p.returncode
if exit_status != 0:
_log("runcmd: %s" % (cmd,))
_log("exit status: " + str(exit_status))
_log("stdout: " + output)
_log("stderr: " + _g_last_runcmd_error)
return exit_status, output
def _runDrawCmd(inputfilename, cmd, outputfilename):
if not _g_defaultDelayedDrawing:
return _runcmd([fmbt_config.imagemagick_convert,
inputfilename] + cmd + [outputfilename])
# Do delayed drawing to save test execution time. If the output
# file does not exist, just copy inputfile to outputfile and start
# logging delayed draw commands to
# outputfile.delayeddraw. Otherwise append latest command to
# outputfile.delayeddraw.
delayedCmd = '%s "%s" "%s" "%s"\n' % (
fmbt_config.imagemagick_convert,
outputfilename, '%s' % ('" "'.join(cmd)), outputfilename)
delayedDrawFilename = outputfilename + ".delayeddraw"
try:
if os.access(outputfilename, os.R_OK) == False:
shutil.copy(inputfilename, outputfilename)
open(delayedDrawFilename, "w").write(delayedCmd)
else:
open(delayedDrawFilename, "a").write(delayedCmd)
except:
_log("error on delayed drawing: %s" % (delayedCmd,))
raise
_log("delayed drawing: %s" % (delayedCmd,))
return (0, "")
def _safeForShell(s):
# convert all non-ascii and bad chars to _
try: s = str(s, "utf-8")
except: pass
return ''.join([(c, "_")[ord(c)>128 or c in "'\"\\`"] for c in s])
def _coordsToInt(xy_coords, wh=None):
"""
Convert percentages to screen coordinates
"""
x, y = xy_coords
if wh is None:
width, height = None, None
else:
width, height = wh
if (width == None or height == None):
width, height = screenSize()
if 0.0 <= x <= 1.0 and type(x) == float:
x = int(round(x * width))
else:
x = int(x)
if 0.0 <= y <= 1.0 and type(y) == float:
y = int(round(y * height))
else:
y = int(y)
return (x, y)
def setPreprocessFilter(preprocess):
global _g_preprocess
_g_preprocess = preprocess
def iSetDefaultClickDryRun(dryRun):
"""
Set the default value for optional dryRun parameter for iClick*
functions.
"""
global _g_defaultClickDryRun
_g_defaultClickDryRun = dryRun
def iSetDefaultDelayedDrawing(delayedDrawing):
"""
Set the default for delaying drawing operations on captured
screenshots.
If delayedDrawing == False, drawing actions on screenshots (like
highlighting icon and clicked coordinates) takes place during the
function execution (like iClickIcon).
If delayedDrawing == True, the screenshot is saved without
highlighted areas, and <screenshot filename>.delayeddraw file
contains all draw commands that can be executed after the test
run. This may save a lot test execution time and CPU on the device
that runs eyenfinger.
The default is False.
"""
global _g_defaultDelayedDrawing
_g_defaultDelayedDrawing = delayedDrawing
def iSetDefaultIconMatch(match):
"""
Set the default icon matching value, ranging from 0 to 1. The
value will be used in iClickIcon and iVerifyIcon, if the optional
match parameter is omitted. Value 1.0 will use pixel-perfect
matching (the default), values below 1.0 will use fuzzy matching.
Fuzzy matching is EXPERIMENTAL.
"""
global _g_defaultIconMatch
_g_defaultIconMatch = match
def iSetDefaultIconColorMatch(colorMatch):
"""
Set the default color matching value, ranging from 0 to 1. When
using pixel-perfect matching this will allow given error in pixel
colors.
For instance, when comparing 24 bit RGB images, value 0.97 will
allow 256 - int(256 * .97) = 8 difference on each color channel.
"""
global _g_defaultIconColorMatch
_g_defaultIconColorMatch = colorMatch
def iSetDefaultIconOpacityLimit(opacityLimit):
"""
Set the default minimum opacity for pixels to be matched. Defaults
to 0.0, all pixels are matched independently of their opacity.
"""
global _g_defaultIconOpacityLimit
_g_defaultIconOpacityLimit = opacityLimit
def iSetDefaultInputKeyDevice(deviceName):
"""
Use deviceName as a default input device for iInputKey.
iSetDefaultInputKeyDevice("/dev/input/event0")
iInputKey(["enter"])
"""
global _g_defaultInputKeyDevice
_g_defaultInputKeyDevice = deviceName
def iSetDefaultReadWithOCR(ocr):
"""
Set the default for using OCR when reading images or windows.
"""
global _g_defaultReadWithOCR
_g_defaultReadWithOCR = ocr
def screenSize():
"""
Returns the size of the screen as a pair (width, height).
"""
if _g_screenSize == (0, 0):
_getScreenSize()
return _g_screenSize
def windowSize():
"""
Returns the size of the window as a pair (width, height).
Choose a window first, for instance with iRead() or iUseWindow().
"""
if _g_lastWindow == None:
raise BadWindowName("undefined window")
return _g_windowSizes[_g_lastWindow]
def windowXY():
"""
Returns screen coordinates of the top-left corner of the window as
a pair (x, y).
Choose a window first, for instance with iRead() or iUseWindow().
"""
if _g_lastWindow == None:
raise BadWindowName("undefined window")
return _g_windowOffsets[_g_lastWindow]
def imageSize(imageFilename):
"""
Returns image size as pair (width, height).
"""
struct_bbox = Bbox(0,0,0,0,0)
imageFilename = bytes(imageFilename, encoding='utf-8')
err = eye4graphics.imageDimensions(ctypes.byref(struct_bbox),
imageFilename)
if err != 0:
return None, None
return struct_bbox.right, struct_bbox.bottom
def iRead(windowId = None, source = None, preprocess = None, ocr=None, capture=None, ocrArea=(0, 0, 1.0, 1.0), ocrPageSegModes=(3,), lang="eng", configfile=None):
"""
DEPRECATED - use fmbtx11.Screen.refreshScreenshot instead.
Read the contents of the given window or other source. If neither
of windowId or source is given, reads the contents of active
window. iClickWord and iVerifyWord can be used after reading with
OCR.
Parameters:
windowId id (0x....) or the title of the window to be read.
Defaults to None.
source name of the file to be read, for instance a screen
capture. Defaults to None.
preprocess preprocess specification to override the default
that is set using setPreprocessFilter. Defaults
to None. Set to "" to disable preprocessing before
OCR.
ocr words will be read using OCR if True
(the default). Read object can be used with
iClickIcon and iVerifyIcon without OCR, too.
capture save image with read words highlighted to this
file. Default: None (nothing is saved).
ocrArea (top, left, right, bottom) coordinates -
area of the image to be read with OCR.
ocrPageSegModes
tuple of integers, see tesseract -pagesegmodes
lang Tesseract language setting, the default is "eng".
Refer to LANGUAGES in Tesseract documentation or
man page.
configfile Tesseract configuration file.
Returns list of words detected by OCR from the read object.
"""
global _g_hocr
global _g_lastWindow
global _g_words
global _g_readImage
global _g_origImage
global _g_tesseractPSM
_g_words = None
_g_readImage = None
_g_origImage = None
if ocr == None:
ocr = _g_defaultReadWithOCR
if not source:
iUseWindow(windowId)
# take a screenshot
import fmbtx11
fmbtx11.Screen().refreshScreenshot().save(SCREENSHOT_FILENAME + ".png")
_runcmd("%s %s.png -crop %sx%s+%s+%s +repage '%s'" %
(fmbt_config.imagemagick_convert, SCREENSHOT_FILENAME,
_g_windowSizes[_g_lastWindow][0], _g_windowSizes[_g_lastWindow][1],
_g_windowOffsets[_g_lastWindow][0], _g_windowOffsets[_g_lastWindow][1],
SCREENSHOT_FILENAME))
source = SCREENSHOT_FILENAME
else:
iUseImageAsWindow(source)
_g_origImage = source
orig_width, orig_height = _g_windowSizes[_g_lastWindow][0], _g_windowSizes[_g_lastWindow][1]
x1, y1 = _coordsToInt(ocrArea[:2], (orig_width, orig_height))
x2, y2 = _coordsToInt(ocrArea[2:], (orig_width, orig_height))
if x2 <= x1 or y2 <= y1:
raise EyenfingerError("Invalid area size: %s => %s" % (ocrArea, (x1, y1, x2, y2)))
if orig_width <= 0 or orig_height <= 0:
raise EyenfingerError("Invalid image size: %sx%s" % (orig_width, orig_height))
if not ocr:
if capture:
drawWords(_g_origImage, capture, [], [])
return []
if preprocess == None:
preprocess = _g_preprocess
# convert to text
_g_readImage = _g_origImage + "-pp.png"
if ocrArea == (0, 0, 1.0, 1.0):
croparea = []
wordXOffset = 0
wordYOffset = 0
else:
croparea = ["-crop", "%sx%s+%s+%s" % (x2-x1, y2-y1, x1, y1), "+repage"]
wordXOffset = x1
wordYOffset = y1
# rescale possible resize preprocessing parameter
resize_m = re.search('-resize ([0-9]+)x([0-9]*)', preprocess)
if resize_m:
origXResize = int(resize_m.group(1))
newXResize = int(origXResize/float(orig_width) * (x2-x1))
preprocess = (preprocess[:resize_m.start()] +
("-resize %sx" % (newXResize,)) +
preprocess[resize_m.end():])
_g_words = {}
for psm in ocrPageSegModes:
convert_cmd = ([fmbt_config.imagemagick_convert, _g_origImage] +
croparea +
shlex.split(preprocess) +
[_g_readImage])
tesseract_cmd = ["tesseract", _g_readImage, SCREENSHOT_FILENAME,
"-l", lang, _g_tesseractPSM, str(psm), "hocr"]
if isinstance(configfile, basestring):
tesseract_cmd += [configfile]
elif isinstance(configfile, list) or isinstance(configfile, tuple):
tesseract_cmd += configfile
exit_status, output = _runcmd(convert_cmd)
if exit_status != 0:
raise NoOCRResults("Convert returned exit status (%s): %s"
% (exit_status, _g_last_runcmd_error))
exit_status, output = _runcmd(tesseract_cmd)
if (exit_status == 1 and "'-psm'" in _g_last_runcmd_error
and _g_tesseractPSM == "-psm"):
# Tesseract versions up to 4.x.x beta take "-psm", but
# from some point on they want "--psm". Detect the error
# due to wrong format and switch to another, if needed.
_g_tesseractPSM = "--psm"
tesseract_cmd[tesseract_cmd.index("-psm")] = _g_tesseractPSM
exit_status, output = _runcmd(tesseract_cmd)
if exit_status != 0:
raise NoOCRResults("Tesseract returned exit status (%s): %s"
% (exit_status, _g_last_runcmd_error))
hocr_filename = SCREENSHOT_FILENAME + ".html" # Tesseract 3.02
if not os.access(hocr_filename, os.R_OK):
hocr_filename = SCREENSHOT_FILENAME + ".hocr" # Tesseract 3.03
if not os.access(hocr_filename, os.R_OK):
raise NoOCRResults("HOCR output missing. Tesseract OCR 3.02 or greater required.\n")
# store every word and its coordinates
_g_words.update(_hocr2words(open(hocr_filename, encoding="utf8").read()))
# convert word coordinates to the unscaled pixmap
try:
ocr_page_line = [line for line in open(hocr_filename, encoding="utf8").readlines() if "class='ocr_page'" in line][0]
except IndexError:
raise NoOCRResults("Could not read ocr_page class information from %s" % (hocr_filename,))
scaled_width, scaled_height = re.findall('bbox 0 0 ([0-9]+)\s*([0-9]+)', ocr_page_line)[0]
scaled_width, scaled_height = float(scaled_width) / (float(x2-x1)/orig_width), float(scaled_height) / (float(y2-y1)/orig_height)
for word in sorted(_g_words.keys()):
for appearance, (wordid, middle, bbox) in enumerate(_g_words[word]):
_g_words[word][appearance] = \
(wordid,
(int(middle[0]/scaled_width * orig_width) + wordXOffset,
int(middle[1]/scaled_height * orig_height) + wordYOffset),
(int(bbox[0]/scaled_width * orig_width) + wordXOffset,
int(bbox[1]/scaled_height * orig_height) + wordYOffset,
int(bbox[2]/scaled_width * orig_width) + wordXOffset,
int(bbox[3]/scaled_height * orig_height) + wordYOffset))
_log('found "' + word + '": (' + str(bbox[0]) + ', ' + str(bbox[1]) + ')')
if capture:
drawWords(_g_origImage, capture, _g_words, _g_words)
return sorted(_g_words.keys())
def iVerifyWord(word, match=0.33, appearance=1, capture=None):
"""
DEPRECATED - use fmbtx11.Screen.verifyOcrText instead.
Verify that word can be found from previously iRead() image.
Parameters:
word word that should be checked
appearance if word appears many times, appearance to
be clicked. Defaults to the first one.
match minimum matching score
capture save image with verified word highlighted
to this file. Default: None (nothing is saved).
Returns pair: ((score, matchingWord), (left, top, right, bottom)), where
score score of found match (1.0 for perfect match)
matchingWord corresponding word detected by OCR
(left, top, right, bottom)
bounding box of the word in read image
Throws BadMatch error if word is not found.
Throws NoOCRResults error if there are OCR results available
on the current screen.
"""
if _g_words == None:
raise NoOCRResults('iRead has not been called with ocr=True')
score, matching_word = findWord(word)
if capture:
drawWords(_g_origImage, capture, [word], _g_words)
if score < match:
raise BadMatch('No matching word for "%s". The best candidate "%s" with score %.2f, required %.2f' %
(word, matching_word, score, match))
return ((score, matching_word), _g_words[matching_word][appearance-1][2])
def iVerifyText(text, match=0.33, capture=None):
"""
DEPRECATED - use fmbtx11.Screen.verifyOcrText instead.
Verify that text can be found from previously iRead() image.
Parameters:
text multiple words that should be checked
match minimum matching score
capture save image with verified text highlighted
to this file. Default: None (nothing is saved).
Returns pair:
((score, matchingText), (left, top, right, bottom)), where
score score of found match (1.0 for perfect match)
matchingText corresponding text detected by OCR
(left, top, right, bottom)
bounding box of the text in read image
Throws BadMatch error if text is not found.
Throws NoOCRResults error if there are OCR results available
on the current screen.
"""
if _g_words == None:
raise NoOCRResults('iRead has not been called with ocr=True')
score_text_bbox_list = findText(text, match)
if len(score_text_bbox_list) == 0:
raise BadMatch('No match >= %s for text "%s"' % (score, text))
score, text, bbox = score_text_box_list[0]
if capture:
drawBbox(_g_origImage, capture, bbox, "%.2f %s" % (score, text))
return ((score, matching_text), bbox)
def iVerifyIcon(iconFilename, match=None, colorMatch=None, opacityLimit=None, capture=None, area=(0.0, 0.0, 1.0, 1.0), _origin="iVerifyIcon"):
"""
DEPRECATED - use fmbtx11.Screen.verifyBitmap instead.
Verify that icon can be found from previously iRead() image.
Parameters:
iconFilename name of the icon file to be searched for
match minimum matching score between 0 and 1.0,
1.0 is perfect match (default)
colorMatch 1.0 (default) requires exact color match. Value
below 1.0 defines maximum allowed color
difference. See iSetDefaultIconColorMatch.
opacityLimit 0.0 (default) requires exact color values
independently of opacity. If lower than 1.0,
pixel less opaque than given value are skipped
in pixel perfect comparisons.
capture save image with verified icon highlighted
to this file. Default: None (nothing is saved).
area rectangle (left, top, right, bottom). Search
icon inside this rectangle only. Values can be
absolute coordinates, or floats in range [0.0,
1.0] that will be scaled to image dimensions.
The default is (0.0, 0.0, 1.0, 1.0), that is
full rectangle.
Returns pair: (score, (left, top, right, bottom)), where
score score of found match (1.0 for perfect match)
(left, top, right, bottom)
bounding box of found icon
Throws BadMatch error if icon is not found.
"""
if not eye4graphics:
_log('ERROR: %s("%s") called, but eye4graphics not loaded.' % (_origin, iconFilename))
raise EyenfingerError("eye4graphics not available")
if not _g_origImage:
_log('ERROR %s("%s") called, but source not defined (iRead not called).' % (_origin, iconFilename))
raise BadSourceImage("Source image not defined, cannot search for an icon.")
if not (os.path.isfile(iconFilename) and os.access(iconFilename, os.R_OK)):
_log('ERROR %s("%s") called, but the icon file is not readable.' % (_origin, iconFilename))
raise BadIconImage('Icon "%s" is not readable.' % (iconFilename,))
if match == None:
match = _g_defaultIconMatch
if match > 1.0:
_log('ERROR %s("%s"): invalid match value, must be below 1.0. ' % (_origin, iconFilename,))
raise ValueError("invalid match value: %s, should be 0 <= match <= 1.0" % (match,))
if colorMatch == None:
colorMatch = _g_defaultIconColorMatch
if not 0.0 <= colorMatch <= 1.0:
_log('ERROR %s("%s"): invalid colorMatch value, must be between 0 and 1. ' % (_origin, iconFilename,))
raise ValueError("invalid colorMatch value: %s, should be 0 <= colorMatch <= 1.0" % (colorMatch,))
if opacityLimit == None:
opacityLimit = _g_defaultIconOpacityLimit
if not 0.0 <= opacityLimit <= 1.0:
_log('ERROR %s("%s"): invalid opacityLimit value, must be between 0 and 1. ' % (_origin, iconFilename,))
raise ValueError("invalid opacityLimit value: %s, should be 0 <= opacityLimit <= 1.0" % (opacityLimit,))
if area[0] > area[2] or area[1] >= area[3]:
raise ValueError("invalid area: %s, should be rectangle (left, top, right, bottom)" % (area,))
leftTopRightBottomZero = (_coordsToInt((area[0], area[1]), windowSize()) +
_coordsToInt((area[2], area[3]), windowSize()) +
(0,))
struct_area_bbox = Bbox(*leftTopRightBottomZero)
struct_bbox = Bbox(0,0,0,0,0)
threshold = int((1.0-match)*20)
err = eye4graphics.findSingleIcon(ctypes.byref(struct_bbox),
_g_origImage, iconFilename, threshold,
ctypes.c_double(colorMatch),
ctypes.c_double(opacityLimit),
ctypes.byref(struct_area_bbox))
bbox = (int(struct_bbox.left), int(struct_bbox.top),
int(struct_bbox.right), int(struct_bbox.bottom))
if err == -1 or err == -2:
msg = '%s: "%s" not found, match=%.2f, threshold=%s, closest threshold %s.' % (
_origin, iconFilename, match, threshold, int(struct_bbox.error))
if capture:
drawIcon(_g_origImage, capture, iconFilename, bbox, 'red')
_log(msg)
raise BadMatch(msg)
elif err != 0:
_log("%s: findSingleIcon returned %s" % (_origin, err,))
raise BadMatch("%s not found, findSingleIcon returned %s." % (iconFilename, err))
if threshold > 0:
score = (threshold - int(struct_bbox.error)) / float(threshold)
else:
score = 1.0
if capture:
drawIcon(_g_origImage, capture, iconFilename, bbox, area=leftTopRightBottomZero[:4])
return (score, bbox)
def iClickIcon(iconFilename, clickPos=(0.5,0.5), match=None,
colorMatch=None, opacityLimit=None,
mouseButton=1, mouseEvent=MOUSEEVENT_CLICK, dryRun=None, capture=None):
"""
DEPRECATED - use fmbtx11.Screen.tapBitmap instead.
Click coordinates relative to the given icon in previously iRead() image.
Parameters:
iconFilename read icon from this file
clickPos position to be clicked,
relative to word top-left corner of the bounding
box around the word. X and Y units are relative
to width and height of the box. (0,0) is the
top-left corner, (1,1) is bottom-right corner,
(0.5, 0.5) is the middle point (default).
Values below 0 or greater than 1 click outside
the bounding box.
match 1.0 (default) requires exact match. Value below 1.0
defines minimum required score for fuzzy matching
(EXPERIMENTAL). See iSetDefaultIconMatch.
colorMatch 1.0 (default) requires exact color match. Value
below 1.0 defines maximum allowed color
difference. See iSetDefaultIconColorMatch.
opacityLimit 0.0 (default) requires exact color values
independently of opacity. If lower than 1.0,
pixel less opaque than given value are skipped
in pixel perfect comparisons.
mouseButton mouse button to be synthesized on the event, default is 1.
mouseEvent event to be synthesized, the default is MOUSEEVENT_CLICK,
others: MOUSEEVENT_MOVE, MOUSEEVENT_DOWN, MOUSEEVENT_UP.
dryRun if True, does not synthesize events. Still returns
coordinates of the clicked position and illustrates
the clicked position on the capture image if
given.
capture name of file where image of highlighted icon and
clicked point are saved.
Returns pair (score, (clickedX, clickedY)), where
score score of found match (1.0 for perfect match)
(clickedX, clickedY)
X and Y coordinates of clicked position on the
screen.
Throws BadMatch error if could not find a matching word.
"""
_DEPRECATED()
score, bbox = iVerifyIcon(iconFilename, match=match,
colorMatch=colorMatch, opacityLimit=opacityLimit,
capture=capture, _origin="iClickIcon")
clickedXY = iClickBox(bbox, clickPos, mouseButton, mouseEvent, dryRun,
capture, _captureText = iconFilename)
return (score, clickedXY)
def iClickWord(word, appearance=1, clickPos=(0.5,0.5), match=0.33,
mouseButton=1, mouseEvent=1, dryRun=None, capture=None):
"""
DEPRECATED - use fmbtx11.Screen.tapOcrText instead.
Click coordinates relative to the given word in previously iRead() image.
Parameters:
word word that should be clicked
appearance if word appears many times, appearance to
be clicked. Defaults to the first one.
clickPos position to be clicked,
relative to word top-left corner of the bounding
box around the word. X and Y units are relative
to width and height of the box. (0,0) is the
top-left corner, (1,1) is bottom-right corner,
(0.5, 0.5) is the middle point (default).
Values below 0 or greater than 1 click outside
the bounding box.
capture name of file where image of highlighted word and
clicked point are saved.
Returns pair: ((score, matchingWord), (clickedX, clickedY)), where
score score of found match (1.0 for perfect match)
matchingWord corresponding word detected by OCR
(clickedX, clickedY)
X and Y coordinates of clicked position on the
screen.
Throws BadMatch error if could not find a matching word.
Throws NoOCRResults error if there are OCR results available
on the current screen.
"""
_DEPRECATED()
(score, matching_word), bbox = iVerifyWord(word, appearance=appearance, match=match, capture=False)
clickedX, clickedY = iClickBox(bbox, clickPos, mouseButton, mouseEvent, dryRun, capture=False)
windowId = _g_lastWindow
_log('iClickWord("%s"): word "%s", match %.2f, bbox %s, window offset %s, click %s' %
(word, matching_word, score,
bbox, _g_windowOffsets[windowId],
(clickedX, clickedY)))
if capture:
drawWords(_g_origImage, capture, [word], _g_words)
drawClickedPoint(capture, capture, (clickedX, clickedY))
return ((score, matching_word), (clickedX, clickedY))
def iClickBox(box, clickPos=(0.5, 0.5),
mouseButton=1, mouseEvent=1, dryRun=None,
capture=None, _captureText=None):
"""
DEPRECATED - use fmbtx11.Screen.tapItem instead.
Click coordinates relative to the given bounding box, default is
in the middle of the box.
Parameters:
box (left, top, right, bottom)
coordinates of the box inside the window.
(0, 0) is the top-left corner of the window.
clickPos (offsetX, offsetY) position to be clicked,
relative to the given box. (0, 0) is the
top-left, and (1.0, 1.0) is the lower-right
corner of the box. The default is (0.5, 0.5),
that is, the middle point of the box. Values
smaller than 0 and bigger than 1 are allowed,
too.
mouseButton mouse button to be synthesized on the event, default is 1.
mouseEvent event to be synthesized, the default is MOUSEEVENT_CLICK,
others: MOUSEEVENT_MOVE, MOUSEEVENT_DOWN, MOUSEEVENT_UP.
dryRun if True, does not synthesize events. Still returns
coordinates of the clicked position and illustrates
the clicked position on the capture image if
given.
capture name of file where the last screenshot with
clicked point highlighted is saved. The default
is None (nothing is saved).
Returns pair (clickedX, clickedY)
X and Y coordinates of clicked position on the
screen.
"""
left, top, right, bottom = box
clickWinX = int(left + clickPos[0]*(right-left))
clickWinY = int(top + clickPos[1]*(bottom-top))
(clickedX, clickedY) = iClickWindow((clickWinX, clickWinY),
mouseButton, mouseEvent,
dryRun, capture=False)
if capture:
if _captureText == None:
_captureText = "Box: %s, %s, %s, %s" % (left, top, right, bottom)
drawIcon(_g_origImage, capture, _captureText, (left, top, right, bottom))
drawClickedPoint(capture, capture, (clickedX, clickedY))
return (clickedX, clickedY)
def iClickWindow(clickXY, mouseButton=1, mouseEvent=1, dryRun=None, capture=None):
"""
DEPRECATED - use fmbtx11.Screen.tap instead.
Click given coordinates in the window.
Parameters:
(clickX, clickY)
coordinates to be clicked inside the window.
(0, 0) is the top-left corner of the window.
Integer values are window coordinates. Floating
point values from 0.0 to 1.0 are scaled to window
coordinates: (0.5, 0.5) is the middle of the
window, and (1.0, 1.0) the bottom-right corner of
the window.
mouseButton mouse button to be synthesized on the event, default is 1.
mouseEvent event to be synthesized, the default is MOUSEEVENT_CLICK,
others: MOUSEEVENT_MOVE, MOUSEEVENT_DOWN, MOUSEEVENT_UP.
dryRun if True, does not synthesize events. Still
illustrates the clicked position on the capture
image if given.
capture name of file where the last screenshot with
clicked point highlighted is saved. The default
is None (nothing is saved).
Returns pair (clickedX, clickedY)
X and Y coordinates of clicked position on the
screen.
"""
# Get the size of the window
wndSize = windowSize()
(clickX, clickY) = _coordsToInt(clickXY, wndSize)
# Get the position of the window
wndPos = windowXY()
# If coordinates are given as percentages, convert to window coordinates
clickScrX = clickX + wndPos[0]
clickScrY = clickY + wndPos[1]
iClickScreen((clickScrX, clickScrY), mouseButton, mouseEvent, dryRun, capture)
return (clickScrX, clickScrY)
def iClickScreen(clickXY, mouseButton=1, mouseEvent=1, dryRun=None, capture=None):
"""
DEPRECATED - use fmbtx11.Screen.tap instead.
Click given absolute coordinates on the screen.
Parameters:
(clickX, clickY)
coordinates to be clicked on the screen. (0, 0)
is the top-left corner of the screen. Integer
values are screen coordinates. Floating point
values from 0.0 to 1.0 are scaled to screen
coordinates: (0.5, 0.5) is the middle of the
screen, and (1.0, 1.0) the bottom-right corner of
the screen.
mouseButton mouse button to be synthesized on the event, default is 1.
mouseEvent event to be synthesized, the default is MOUSEEVENT_CLICK,
others: MOUSEEVENT_MOVE, MOUSEEVENT_DOWN, MOUSEEVENT_UP.
dryRun if True, does not synthesize events. Still
illustrates the clicked position on the capture
image if given.
capture name of file where the last screenshot with
clicked point highlighted is saved. The default
is None (nothing is saved).
"""
_DEPRECATED()
if mouseEvent == MOUSEEVENT_CLICK:
params = "'mouseclick %s'" % (mouseButton,)
elif mouseEvent == MOUSEEVENT_DOWN:
params = "'mousedown %s'" % (mouseButton,)
elif mouseEvent == MOUSEEVENT_UP:
params = "'mouseup %s'" % (mouseButton,)
else:
params = ""
clickX, clickY = _coordsToInt(clickXY)
if capture:
drawClickedPoint(_g_origImage, capture, (clickX, clickY))
if dryRun == None:
dryRun = _g_defaultClickDryRun
if not dryRun:
# use xte from the xautomation package
_runcmd("xte 'mousemove %s %s' %s" % (clickX, clickY, params))
def iGestureScreen(listOfCoordinates, duration=0.5, holdBeforeGesture=0.0, holdAfterGesture=0.0, intermediatePoints=0, capture=None, dryRun=None):
"""
DEPRECATED - use fmbtx11.Screen.drag instead.
Synthesizes a gesture on the screen.
Parameters:
listOfCoordinates
The coordinates through which the cursor moves.
Integer values are screen coordinates. Floating
point values from 0.0 to 1.0 are scaled to screen
coordinates: (0.5, 0.5) is the middle of the
screen, and (1.0, 1.0) the bottom-right corner of
the screen.
duration gesture time in seconds, excluding
holdBeforeGesture and holdAfterGesture times.
holdBeforeGesture
time in seconds to keep mouse down before the
gesture.
holdAfterGesture
time in seconds to keep mouse down after the
gesture.
intermediatePoints
the number of intermediate points to be added
between each of the coordinates. Intermediate
points are added to straight lines between start
and end points.
capture name of file where the last screenshot with
the points through which the cursors passes is
saved. The default is None (nothing is saved).
dryRun if True, does not synthesize events. Still
illustrates the coordinates through which the cursor
goes.
"""
_DEPRECATED()
# The params list to be fed to xte
params = []
# The list of coordinates through which the cursor has to go
goThroughCoordinates = []
for pos in range(len(listOfCoordinates)):
x, y = _coordsToInt(listOfCoordinates[pos])
goThroughCoordinates.append((x,y))
if pos == len(listOfCoordinates) - 1:
break # last coordinate added
nextX, nextY = _coordsToInt(listOfCoordinates[pos+1])
(x,y), (nextX, nextY) = (x, y), (nextX, nextY)
for ip in range(intermediatePoints):
goThroughCoordinates.append(
(int(round(x + (nextX-x)*(ip+1)/float(intermediatePoints+1))),
int(round(y + (nextY-y)*(ip+1)/float(intermediatePoints+1)))))
# Calculate the time (in micro seconds) to sleep between moves.
if len(goThroughCoordinates) > 1:
moveDelay = 1000000 * float(duration) / (len(goThroughCoordinates)-1)
else:
moveDelay = 0
if not dryRun:
# Build the params list.
params.append("'mousemove %d %d'" % goThroughCoordinates[0])
params.append("'mousedown 1 '")
if holdBeforeGesture > 0:
params.append("'usleep %d'" % (holdBeforeGesture * 1000000,))
for i in range(1, len(goThroughCoordinates)):
params.append("'usleep %d'" % (moveDelay,))
params.append("'mousemove %d %d'" % goThroughCoordinates[i])
if holdAfterGesture > 0:
params.append("'usleep %d'" % (holdAfterGesture * 1000000,))
params.append("'mouseup 1'")
# Perform the gesture
_runcmd("xte %s" % (" ".join(params),))
if capture:
intCoordinates = [ _coordsToInt(point) for point in listOfCoordinates ]
drawLines(_g_origImage, capture, intCoordinates, goThroughCoordinates)
return goThroughCoordinates
def iGestureWindow(listOfCoordinates, duration=0.5, holdBeforeGesture=0.0, holdAfterGesture=0.0, intermediatePoints=0, capture=None, dryRun=None):
"""
DEPRECATED - use fmbtx11.Screen.drag instead.
Synthesizes a gesture on the window.
Parameters:
listOfCoordinates
The coordinates through which the cursor moves.
Integer values are window coordinates. Floating
point values from 0.0 to 1.0 are scaled to window
coordinates: (0.5, 0.5) is the middle of the
window, and (1.0, 1.0) the bottom-right corner of
the window.
duration gesture time in seconds, excluding
holdBeforeGesture and holdAfterGesture times.
holdBeforeGesture
time in seconds to keep mouse down before the
gesture.
holdAfterGesture
time in seconds to keep mouse down after the
gesture.
intermediatePoints
the number of intermediate points to be added
between each of the coordinates. Intermediate
points are added to straight lines between start
and end points.
capture name of file where the last screenshot with
the points through which the cursors passes is
saved. The default is None (nothing is saved).
dryRun if True, does not synthesize events. Still
illustrates the coordinates through which the cursor
goes.
"""
screenCoordinates = [ _windowToScreen(*_coordsToInt((x,y),windowSize())) for (x,y) in listOfCoordinates ]
return iGestureScreen(screenCoordinates, duration, holdBeforeGesture, holdAfterGesture, intermediatePoints, capture, dryRun)
def iType(word, delay=0.0):
"""
DEPRECATED - use fmbtx11.Screen.type instead.
Send keypress events.
Parameters:
word is either
- a string containing letters and numbers.
Each letter/number is using press and release events.
- a list that contains
- keys: each key is sent using press and release events.
- (key, event)-pairs: the event (either "press" or "release")
is sent.
- (key1, key2, ..., keyn)-tuples. 2n events is sent:
key1 press, key2 press, ..., keyn press,
keyn release, ..., key2 release, key1 release.
Keys are defined in eyenfinger.Xkeys, for complete list
see keysymdef.h.
delay is given as seconds between sent events
Examples:
iType('hello')
iType([('Shift_L', 'press'), 'h', 'e', ('Shift_L', 'release'), 'l', 'l', 'o'])
iType([('Control_L', 'Alt_L', 'Delete')])
"""
_DEPRECATED()
args = []
for char in word:
if type(char) == tuple:
if char[1].lower() == 'press':
args.append("'keydown %s'" % (char[0],))
elif char[1].lower() == 'release':
args.append("'keyup %s'" % (char[0],))
else:
rest = []
for key in char:
args.append("'keydown %s'" % (key,))
rest.insert(0, "'keyup %s'" % (key,))
args = args + rest
else:
# char is keyname or single letter/number
args.append("'key %s'" % (char,))
usdelay = " 'usleep %s' " % (int(delay*1000000),)
_runcmd("xte %s" % (usdelay.join(args),))
def iInputKey(*args, **kwargs):
"""
DEPRECATED - use fmbtx11.Screen.pressKey instead.
Send keypresses using Linux evdev interface
(/dev/input/eventXX).
iInputKey(keySpec[, keySpec...], hold=<float>, delay=<float>, device=<str>)
Parameters:
keySpec is one of the following:
- a string of one-character-long key names:
"aesc" will send four keypresses: A, E, S and C.
- a list of key names:
["a", "esc"] will send two keypresses: A and ESC.
Key names are listed in eyenfinger.InputKeys.
- an integer:
116 will press the POWER key.
- "_" or "^":
only press or release event will be generated
for the next key, respectively.
If a key name inside keySpec is prefixed by "_"
or "^", only press or release event is generated
for that key.
hold time (in seconds) to hold the key before
releasing. The default is 0.1.
delay delay (in seconds) after key release. The default
is 0.1.
device name of the input device or input event file to
which all key presses are sent. The default can
be set with iSetDefaultInputKeyDevice(). For
instance, "/dev/input/event0" or a name of a
device in /proc/bus/input/devices.
"""
_DEPRECATED()
hold = kwargs.get("hold", 0.1)
delay = kwargs.get("delay", 0.1)
device = kwargs.get("device", _g_defaultInputKeyDevice)
inputKeySeq = []
press, release = 1, 1
for a in args:
if a == "_": press, release = 1, 0
elif a == "^": press, release = 0, 1
elif type(a) == str:
for char in a:
if char == "_": press, release = 1, 0
elif char == "^": press, release = 0, 1
else:
inputKeySeq.append((press, release, _inputKeyNameToCode(char.upper())))
press, release = 1, 1
elif type(a) in (tuple, list):
for keySpec in a:
if type(keySpec) == int:
inputKeySeq.append((press, release, keySpec))
press, release = 1, 1
else:
if keySpec.startswith("_"):
press, release = 1, 0
keySpec = keySpec[1:]
elif keySpec.startswith("^"):
press, release = 0, 1
keySpec = keySpec[1:]
if keySpec:
inputKeySeq.append((press, release, _inputKeyNameToCode(keySpec.upper())))
press, release = 1, 1
elif type(a) == int:
inputKeySeq.append((press, release, a))
press, release = 1, 1
else:
raise ValueError('Invalid keySpec "%s"' % (a,))
if inputKeySeq:
_writeInputKeySeq(_deviceFilename(device), inputKeySeq, hold=hold, delay=delay)
def _deviceFilename(deviceName):
if not _deviceFilename.deviceCache:
_deviceFilename.deviceCache = dict(_listInputDevices())
if not deviceName in _deviceFilename.deviceCache:
return deviceName
else:
return _deviceFilename.deviceCache[deviceName]
_deviceFilename.deviceCache = {}
def _listInputDevices():
nameAndFile = []
for l in open("/proc/bus/input/devices"):
if l.startswith("N: Name="):
nameAndFile.append([l.split('"')[1]])
elif l.startswith("H: Handlers=") and "event" in l:
try:
eventFilename = re.findall("(event[0-9]+)", l)[0]
nameAndFile[-1].append("/dev/input/%s" % (eventFilename,))
except:
_log('WARNING: Could not recognise event[0-9] filename from row "%s".' % (l.strip(),))
return nameAndFile
def _writeInputKeySeq(filename, keyCodeSeq, hold=0.1, delay=0.1):
if type(filename) != str or len(filename) == 0:
raise ValueError('Invalid input device "%s"' % (filename,))
fd = os.open(filename, os.O_WRONLY | os.O_NONBLOCK)
for press, release, keyCode in keyCodeSeq:
if press:
bytes = os.write(fd, struct.pack(_InputEventStructSpec,
int(time.time()), 0, _EV_KEY, keyCode, 1))
if bytes > 0:
bytes += os.write(fd, struct.pack(_InputEventStructSpec,
0, 0, 0, 0, 0))
time.sleep(hold)
if release:
bytes += os.write(fd, struct.pack(_InputEventStructSpec,
int(time.time()), 0, _EV_KEY, keyCode, 0))
if bytes > 0:
bytes += os.write(fd, struct.pack(_InputEventStructSpec,
0, 0, 0, 0, 0))
time.sleep(delay)
os.close(fd)
def findWord(word, detected_words = None, appearance=1):
"""
Returns pair (score, corresponding-detected-word)
"""
if detected_words == None:
detected_words = _g_words
if _g_words == None:
raise NoOCRResults()
scored_words = []
for w in detected_words:
scored_words.append((_score(w, word), w))
scored_words.sort()
if len(scored_words) == 0:
raise BadMatch("No words found.")
return scored_words[-1]
def findText(text, detected_words = None, match=-1):
def biggerBox(bbox_list):
left, top, right, bottom = bbox_list[0]
for l, t, r, b in bbox_list[1:]:
left = min(left, l)
top = min(top, t)
right = max(right, r)
bottom = max(bottom, b)
return (left, top, right, bottom)
words = text.split()
word_count = len(words)
detected_texts = [] # strings of <word_count> words
if detected_words == None:
detected_words = _g_words
if _g_words == None:
raise NoOCRResults()
# sort by numeric word id
words_by_id = []
for word in detected_words:
for wid, middle, bbox in detected_words[word]:
# change word id from "word_2_42" to (2, 42)
int_wid = [int(n) for n in wid[5:].split("_")]
words_by_id.append(
(int_wid, word, bbox))
words_by_id.sort()
scored_texts = []
if word_count > 0:
for i in range(len(words_by_id)-word_count+1):
detected_texts.append(
(" ".join([w[1] for w in words_by_id[i:i+word_count]]),
biggerBox([w[2] for w in words_by_id[i:i+word_count]])))
norm_text = " ".join(words) # normalize whitespace
for t in detected_texts:
scored_texts.append((_score(t[0], norm_text), t[0], t[1]))
scored_texts.sort()
elif match == 0.0:
# text == "", match == 0 => every word is a match
for w in words_by_id:
detected_texts.append((w[1], w[2]))
scored_texts = [(0.0, t[0], t[1]) for t in detected_texts]
else:
# text == "", match != 0 => no hits
detected_texts = []
scored_texts = []
return [st for st in scored_texts if st[0] >= match]
def _score(w1, w2):
closeMatch = {
'1l': 0.1,
'1I': 0.2,
'Il': 0.2
}
def levenshteinDistance(w1, w2):
m = [range(len(w1)+1)]
for j in range(len(w2)+1):
m.append([])
m[-1].append(j+1)
i, j = 0, 0
for j in range(1, len(w2)+1):
for i in range(1, len(w1)+1):
if w1[i-1] == w2[j-1]:
m[j].append(m[j-1][i-1])
else:
# This is not part of Levenshtein:
# if characters often look similar,
# don't add full edit distance (1.0),
# use the value in closeMatch instead.
chars = ''.join(sorted(w1[i-1] + w2[j-1]))
if chars in closeMatch:
m[j].append(m[j-1][i-1]+closeMatch[chars])
else:
# Standard Levenshtein continues...
m[j].append(min(
m[j-1][i] + 1, # delete
m[j][i-1] + 1, # insert
m[j-1][i-1] + 1 # substitute
))
return m[j][i]
return 1 - (levenshteinDistance(w1, w2) / float(max(len(w1),len(w2))))
def _hocr2words(hocr):
rv = {}
hocr = hocr.replace("<strong>","").replace("</strong>","").replace("<em>","").replace("</em>","")
hocr.replace("'", "'")
for name, code in html.entities.name2codepoint.items():
if code < 128:
hocr = hocr.replace('&' + name + ';', chr(code))
ocr_word = re.compile('''<span class=['"]ocrx?_word["'] id=['"]([^']*)["'] title=['"]bbox ([0-9]+) ([0-9]+) ([0-9]+) ([0-9]+)["';][^>]*>([^<]*)</span>''')
for word_id, bbox_left, bbox_top, bbox_right, bbox_bottom, word in ocr_word.findall(hocr):
bbox_left, bbox_top, bbox_right, bbox_bottom = \
int(bbox_left), int(bbox_top), int(bbox_right), int(bbox_bottom)
if not word in rv:
rv[word] = []
middle_x = (bbox_right + bbox_left) / 2.0
middle_y = (bbox_top + bbox_bottom) / 2.0
rv[word].append((word_id, (middle_x, middle_y),
(bbox_left, bbox_top, bbox_right, bbox_bottom)))
return rv
def _getScreenSize():
global _g_screenSize
_, output = _runcmd("xwininfo -root | awk '/Width:/{w=$NF}/Height:/{h=$NF}END{print w\" \"h}'")
s_width, s_height = output.split(" ")
_g_screenSize = (int(s_width), int(s_height))
def iUseWindow(windowIdOrName = None):
global _g_lastWindow
if windowIdOrName == None:
if _g_lastWindow == None:
_g_lastWindow = iActiveWindow()
elif windowIdOrName.startswith("0x"):
_g_lastWindow = windowIdOrName
else:
_g_lastWindow = _runcmd("xwininfo -name '%s' | awk '/Window id: 0x/{print $4}'" %
(windowIdOrName,))[1].strip()
if not _g_lastWindow.startswith("0x"):
raise BadWindowName('Cannot find window id for "%s" (got: "%s")' %
(windowIdOrName, _g_lastWindow))
_, output = _runcmd("xwininfo -id %s | awk '/Width:/{w=$NF}/Height:/{h=$NF}/Absolute upper-left X/{x=$NF}/Absolute upper-left Y/{y=$NF}END{print x\" \"y\" \"w\" \"h}'" %
(_g_lastWindow,))
offset_x, offset_y, width, height = output.split(" ")
_g_windowOffsets[_g_lastWindow] = (int(offset_x), int(offset_y))
_g_windowSizes[_g_lastWindow] = (int(width), int(height))
_getScreenSize()
return _g_lastWindow
def iUseImageAsWindow(imageFilename):
global _g_lastWindow
global _g_screenSize
if not eye4graphics:
_log('ERROR: iUseImageAsWindow("%s") called, but eye4graphics not loaded.' % (imageFilename,))
raise EyenfingerError("eye4graphics not available")
if not os.access(imageFilename, os.R_OK):
raise BadSourceImage("The input file could not be read or not present.")
_g_lastWindow = imageFilename
imageWidth, imageHeight = imageSize(imageFilename)
if imageWidth == None:
_log('iUseImageAsWindow: Failed reading dimensions of image "%s".' % (imageFilename,))
raise BadSourceImage('Failed to read dimensions of "%s".' % (imageFilename,))
_g_windowOffsets[_g_lastWindow] = (0, 0)
_g_windowSizes[_g_lastWindow] = (imageWidth, imageHeight)
_g_screenSize = _g_windowSizes[_g_lastWindow]
return _g_lastWindow
def iActiveWindow(windowId = None):
""" return id of active window, in '0x1d0f14' format """
if windowId == None:
_, output = _runcmd("xprop -root | awk '/_NET_ACTIVE_WINDOW\(WINDOW\)/{print $NF}'")
windowId = output.strip()
return windowId
def drawBboxes(inputfilename, outputfilename, bboxes):
"""
Draw bounding boxes
"""
if inputfilename == None:
return
draw_commands = []
for bbox in bboxes:
left, top, right, bottom = bbox
color = "green"
draw_commands += ["-stroke", color, "-fill", "blue", "-draw", "fill-opacity 0.2 rectangle %s,%s %s,%s" % (
left, top, right, bottom)]
_runDrawCmd(inputfilename, draw_commands, outputfilename)
def drawBbox(inputfilename, outputfilename, bbox, caption):
"""
Draw bounding box
"""
if inputfilename == None:
return
draw_commands = []
left, top, right, bottom = bbox
color = "green"
draw_commands += ["-stroke", color, "-fill", "blue", "-draw", "fill-opacity 0.2 rectangle %s,%s %s,%s" % (
left, top, right, bottom)]
draw_commands += ["-stroke", "none", "-fill", color, "-draw", "text %s,%s '%s'" % (
left, top, _safeForShell(caption))]
_runDrawCmd(inputfilename, draw_commands, outputfilename)
def drawWords(inputfilename, outputfilename, words, detected_words):
"""
Draw boxes around words detected in inputfilename that match to
given words. Result is saved to outputfilename.
"""
if inputfilename == None:
return
draw_commands = []
for w in words:
score, dw = findWord(w, detected_words)
left, top, right, bottom = detected_words[dw][0][2]
if score < 0.33:
color = "red"
elif score < 0.5:
color = "brown"
else:
color = "green"
draw_commands += ["-stroke", color, "-fill", "blue", "-draw", "fill-opacity 0.2 rectangle %s,%s %s,%s" % (
left, top, right, bottom)]
draw_commands += ["-stroke", "none", "-fill", color, "-draw", "text %s,%s '%s'" % (
left, top, _safeForShell(w))]
draw_commands += ["-stroke", "none", "-fill", color, "-draw", "text %s,%s '%.2f'" % (
left, bottom+10, score)]
_runDrawCmd(inputfilename, draw_commands, outputfilename)
def drawIcon(inputfilename, outputfilename, iconFilename, bboxes, color='green', area=None):
if inputfilename == None:
return
if type(bboxes) == tuple:
bboxes = [bboxes]
show_number = False
else:
show_number = True
draw_commands = []
for index, bbox in enumerate(bboxes):
left, top, right, bottom = bbox[0], bbox[1], bbox[2], bbox[3]
draw_commands += ["-stroke", color, "-fill", "blue", "-draw", "fill-opacity 0.2 rectangle %s,%s %s,%s" % (left, top, right, bottom)]
if show_number:
caption = "%s %s" % (index+1, iconFilename)
else:
caption = iconFilename
draw_commands += ["-stroke", "none", "-fill", color, "-draw", "text %s,%s '%s'" % (
left, top, _safeForShell(caption))]
if area != None:
draw_commands += ["-stroke", "yellow", "-draw", "fill-opacity 0.0 rectangle %s,%s %s,%s" % (area[0]-1, area[1]-1, area[2], area[3])]
_runDrawCmd(inputfilename, draw_commands, outputfilename)
def drawClickedPoint(inputfilename, outputfilename, clickedXY):
"""
clickedXY contains absolute screen coordinates
"""
if inputfilename == None:
return
x, y = clickedXY
x -= _g_windowOffsets[_g_lastWindow][0]
y -= _g_windowOffsets[_g_lastWindow][1]
draw_commands = ["-stroke", "red", "-fill", "blue", "-draw", "fill-opacity 0.2 circle %s,%s %s,%s" % (
x, y, x + 20, y)]
draw_commands += ["-stroke", "none", "-fill", "red", "-draw", "point %s,%s" % (x, y)]
_runDrawCmd(inputfilename, draw_commands, outputfilename)
def _screenToWindow(x,y):
"""
Converts from absolute coordinats to window coordinates
"""
offsetX = _g_windowOffsets[_g_lastWindow][0]
offsetY = _g_windowOffsets[_g_lastWindow][1]
return (x-offsetX, y-offsetY)
def _windowToScreen(x,y):
"""
Converts from window coordinates to screen coordinates
"""
offsetX = _g_windowOffsets[_g_lastWindow][0]
offsetY = _g_windowOffsets[_g_lastWindow][1]
return (x+offsetX, y+offsetY)
def drawLines(inputfilename, outputfilename, orig_coordinates, final_coordinates):
"""
coordinates contains the coordinates connected by lines
"""
if inputfilename == None:
return
# The command which will be run
draw_commands = []
for pos in list(range(len(final_coordinates)-1)):
# Get the pair coordinates
(x, y) = (final_coordinates[pos][0], final_coordinates[pos][1])
(nextX, nextY) = (final_coordinates[pos+1][0], final_coordinates[pos+1][1])
# Convert to window coordinates
(drawX, drawY) = _screenToWindow(x,y)
(drawnextX, drawnextY) = _screenToWindow(nextX, nextY)
# Draw a pair of circles. User-given points are blue
if (x, y) in orig_coordinates:
draw_commands += ["-fill", "blue", "-stroke", "red", "-draw", "fill-opacity 0.2 circle %d, %d %d, %d" % (drawX, drawY, drawX-5, drawY-5)]
# Computer-generated points are white
else:
draw_commands += ["-fill", "white", "-stroke", "red", "-draw", "fill-opacity 0.2 circle %d, %d %d, %d" % (drawX, drawY, drawX-5, drawY-5)]
# Draw the line between the points
draw_commands += ["-stroke", "red", "-draw", "line %d, %d, %d, %d" % (drawX, drawY, drawnextX, drawnextY)]
draw_commands += ["-stroke", "black", "-draw", "line %d, %d, %d, %d" % (drawX+1, drawY+1, drawnextX+1, drawnextY+1)]
if len(final_coordinates) > 0:
lastIndex = len(final_coordinates)-1
(finalX, finalY) = _screenToWindow(final_coordinates[lastIndex][0], final_coordinates[lastIndex][1])
draw_commands += ["-fill", "blue", "-stroke", "red", "-draw", "fill-opacity 0.2 circle %d, %d %d, %d" % (finalX, finalY, finalX-5, finalY-5)]
_runDrawCmd(inputfilename, draw_commands, outputfilename)
def evaluatePreprocessFilter(imageFilename, ppfilter, words):
"""
Visualise how given words are detected from given image file when
using given preprocessing filter.
"""
global _g_preprocess
evaluatePreprocessFilter.count += 1
preprocessed_filename = '%s-pre%s.png' % (imageFilename, evaluatePreprocessFilter.count)
_runcmd("convert '%s' %s '%s' && tesseract %s eyenfinger.autoconfigure hocr" %
(imageFilename, ppfilter, preprocessed_filename,
preprocessed_filename))
detected_words = _hocr2words(open("eyenfinger.autoconfigure.html").read())
scored_words = []
for w in words:
try:
score, word = findWord(w, detected_words)
except BadMatch:
return
scored_words.append((score, word, w))
scored_words.sort()
avg_score = sum([s[0] for s in scored_words])/float(len(scored_words))
evaluatePreprocessFilter.scores.append( (scored_words[0][0] + avg_score, scored_words[0][0], avg_score, ppfilter) )
evaluatePreprocessFilter.scores.sort()
# set the best preprocess filter so far as a default
_g_preprocess = evaluatePreprocessFilter.scores[-1][-1]
drawWords(preprocessed_filename, preprocessed_filename, words, detected_words)
sys.stdout.write("%.2f %s %s %s\n" % (sum([s[0] for s in scored_words])/float(len(scored_words)), scored_words[0], preprocessed_filename, ppfilter))
sys.stdout.flush()
evaluatePreprocessFilter.count = 0
evaluatePreprocessFilter.scores = []
def autoconfigure(imageFilename, words):
"""
Search for image preprocessing configuration that will maximise
the score of finding given words in the image.
Returns configuration as a string.
"""
# check image width
iUseImageAsWindow(imageFilename)
image_width = _g_windowSizes[_g_lastWindow][0]
resize_filters = ['Mitchell', 'Catrom', 'Hermite', 'Gaussian']
levels = [(20, 20), (50, 50), (80, 80), (5, 5), (95, 95),
(30, 30), (40, 40), (60, 60), (70, 70), (60, 60),
(10, 30), (30, 50), (50, 70), (70, 90), (80, 100)]
zoom = [1, 2]
for f in resize_filters:
for z in zoom:
for blevel, wlevel in levels:
evaluatePreprocessFilter(
imageFilename,
"-sharpen 5 -level %s%%,%s%%,3.0 -sharpen 5" % (blevel, wlevel),
words)
evaluatePreprocessFilter(
imageFilename,
"-sharpen 5 -filter %s -resize %sx -sharpen 5 -level %s%%,%s%%,3.0 -sharpen 5" % (f, z * image_width, blevel, wlevel),
words)
evaluatePreprocessFilter(
imageFilename,
"-sharpen 5 -filter %s -resize %sx -level %s%%,%s%%,3.0 -sharpen 5" % (
f, z * image_width, blevel, wlevel),
words)
evaluatePreprocessFilter(
imageFilename,
"-sharpen 5 -filter %s -resize %sx -level %s%%,%s%%,3.0" % (
f, z * image_width, blevel, wlevel),
words)
evaluatePreprocessFilter(
imageFilename,
"-sharpen 5 -level %s%%,%s%%,3.0 -filter %s -resize %sx -sharpen 5" % (
blevel, wlevel, f, z * image_width),
words)
evaluatePreprocessFilter(
imageFilename,
"-sharpen 5 -level %s%%,%s%%,1.0 -filter %s -resize %sx" % (
blevel, wlevel, f, z * image_width),
words)
evaluatePreprocessFilter(
imageFilename,
"-sharpen 5 -level %s%%,%s%%,10.0 -filter %s -resize %sx" % (
blevel, wlevel, f, z * image_width),
words)
| mixu-/fMBT | utils3/eyenfinger.py | Python | lgpl-2.1 | 71,456 | [
"Gaussian"
] | 8d188439771fc9e610379b6023415ff0de76ac7d7f11d5ca2b09556798cfba1d |
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for methods relating to sending emails."""
import datetime
import types
from core.domain import config_services
from core.domain import email_manager
from core.domain import rights_manager
from core.domain import subscription_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
(email_models,) = models.Registry.import_models([models.NAMES.email])
class EmailRightsTest(test_utils.GenericTestBase):
"""Test that only certain users can send certain types of emails."""
def setUp(self):
super(EmailRightsTest, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.moderator_id = self.get_user_id_from_email(self.MODERATOR_EMAIL)
self.set_moderators([self.MODERATOR_USERNAME])
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
def test_sender_id_validation(self):
sender_ids_to_test = [
feconf.SYSTEM_COMMITTER_ID, self.admin_id, self.moderator_id,
self.editor_id]
# These are given in the order of user_ids_to_test.
expected_validation_results = {
feconf.EMAIL_INTENT_SIGNUP: (True, False, False, False),
feconf.EMAIL_INTENT_DAILY_BATCH: (True, False, False, False),
feconf.EMAIL_INTENT_MARKETING: (False, True, False, False),
feconf.EMAIL_INTENT_PUBLICIZE_EXPLORATION: (
False, True, True, False),
feconf.EMAIL_INTENT_UNPUBLISH_EXPLORATION: (
False, True, True, False),
feconf.EMAIL_INTENT_DELETE_EXPLORATION: (
False, True, True, False),
}
# pylint: disable=protected-access
for intent in expected_validation_results:
for ind, sender_id in enumerate(sender_ids_to_test):
if expected_validation_results[intent][ind]:
email_manager._require_sender_id_is_valid(
intent, sender_id)
else:
with self.assertRaisesRegexp(
Exception, 'Invalid sender_id'
):
email_manager._require_sender_id_is_valid(
intent, sender_id)
# Also test null and invalid intent strings.
with self.assertRaisesRegexp(Exception, 'Invalid email intent string'):
email_manager._require_sender_id_is_valid(
'', feconf.SYSTEM_COMMITTER_ID)
with self.assertRaisesRegexp(Exception, 'Invalid email intent string'):
email_manager._require_sender_id_is_valid(
'', self.admin_id)
with self.assertRaisesRegexp(Exception, 'Invalid email intent string'):
email_manager._require_sender_id_is_valid(
'invalid_intent', feconf.SYSTEM_COMMITTER_ID)
with self.assertRaisesRegexp(Exception, 'Invalid email intent string'):
email_manager._require_sender_id_is_valid(
'invalid_intent', self.admin_id)
# pylint: enable=protected-access
class ExplorationMembershipEmailTests(test_utils.GenericTestBase):
"""Tests that sending exploration membership email works as expected."""
EXPLORATION_TITLE = 'Title'
def setUp(self):
super(ExplorationMembershipEmailTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.exploration = self.save_new_default_exploration(
'A', self.editor_id, self.EXPLORATION_TITLE)
self.expected_email_subject = (
'%s - invitation to collaborate') % self.EXPLORATION_TITLE
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
self.can_send_editor_role_email_ctx = self.swap(
feconf, 'CAN_SEND_EDITOR_ROLE_EMAILS', True)
def test_role_email_is_sent_when_editor_assigns_role(self):
with self.can_send_emails_ctx, self.can_send_editor_role_email_ctx:
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('%s/%s' % (
feconf.EDITOR_URL_PREFIX, self.exploration.id))
csrf_token = self.get_csrf_token_from_response(response)
self.put_json('%s/%s' % (
feconf.EXPLORATION_RIGHTS_PREFIX, self.exploration.id), {
'version': self.exploration.version,
'new_member_username': self.NEW_USER_USERNAME,
'new_member_role': rights_manager.ROLE_EDITOR,
}, csrf_token=csrf_token)
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 1)
def test_email_is_not_sent_if_recipient_has_declined_such_emails(self):
user_services.update_email_preferences(
self.new_user_id, True, False, False, False)
with self.can_send_emails_ctx, self.can_send_editor_role_email_ctx:
email_manager.send_role_notification_email(
self.editor_id, self.new_user_id, rights_manager.ROLE_OWNER,
self.exploration.id, self.exploration.title)
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 0)
def test_role_emails_sent_are_correct(self):
with self.can_send_emails_ctx, self.can_send_editor_role_email_ctx:
email_manager.send_role_notification_email(
self.editor_id, self.new_user_id, rights_manager.ROLE_VIEWER,
self.exploration.id, self.exploration.title)
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 1)
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
sent_email_model = all_models[0]
# Check that email details are correct.
self.assertEqual(
sent_email_model.recipient_id,
self.new_user_id)
self.assertEqual(
sent_email_model.recipient_email, self.NEW_USER_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'%s <%s>' % (
self.EDITOR_USERNAME, feconf.NOREPLY_EMAIL_ADDRESS))
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_EDITOR_ROLE_NOTIFICATION)
self.assertEqual(
sent_email_model.subject,
self.expected_email_subject)
def test_correct_rights_are_written_in_manager_role_email_body(self):
expected_email_html_body = (
'Hi newuser,<br>'
'<br>'
'<b>editor</b> has granted you manager rights to their '
'exploration, '
'"<a href="http://www.oppia.org/create/A">Title</a>", '
'on Oppia.org.<br>'
'<br>'
'This allows you to:<br>'
'<ul>'
'<li>Change the exploration permissions</li><br>'
'<li>Edit the exploration</li><br>'
'<li>View and playtest the exploration</li><br>'
'</ul>'
'You can find the exploration '
'<a href="http://www.oppia.org/create/A">here</a>.<br>'
'<br>'
'Thanks, and happy collaborating!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body = (
'Hi newuser,\n'
'\n'
'editor has granted you manager rights to their '
'exploration, "Title", on Oppia.org.\n'
'\n'
'This allows you to:\n'
'- Change the exploration permissions\n'
'- Edit the exploration\n'
'- View and playtest the exploration\n'
'You can find the exploration here.\n'
'\n'
'Thanks, and happy collaborating!\n'
'\n'
'Best wishes,\n'
'The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_editor_role_email_ctx:
# Check that correct email content is sent for Manager.
email_manager.send_role_notification_email(
self.editor_id, self.new_user_id, rights_manager.ROLE_OWNER,
self.exploration.id, self.exploration.title)
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html.decode(),
expected_email_html_body)
self.assertEqual(
messages[0].body.decode(),
expected_email_text_body)
def test_correct_rights_are_written_in_editor_role_email_body(self):
expected_email_html_body = (
'Hi newuser,<br>'
'<br>'
'<b>editor</b> has granted you editor rights to their '
'exploration, '
'"<a href="http://www.oppia.org/create/A">Title</a>"'
', on Oppia.org.<br>'
'<br>'
'This allows you to:<br>'
'<ul>'
'<li>Edit the exploration</li><br>'
'<li>View and playtest the exploration</li><br>'
'</ul>'
'You can find the exploration '
'<a href="http://www.oppia.org/create/A">here</a>.<br>'
'<br>'
'Thanks, and happy collaborating!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body = (
'Hi newuser,\n'
'\n'
'editor has granted you editor rights to their '
'exploration, "Title", on Oppia.org.\n'
'\n'
'This allows you to:\n'
'- Edit the exploration\n'
'- View and playtest the exploration\n'
'You can find the exploration here.\n'
'\n'
'Thanks, and happy collaborating!\n'
'\n'
'Best wishes,\n'
'The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_editor_role_email_ctx:
# Check that correct email content is sent for Editor.
email_manager.send_role_notification_email(
self.editor_id, self.new_user_id, rights_manager.ROLE_EDITOR,
self.exploration.id, self.exploration.title)
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html.decode(),
expected_email_html_body)
self.assertEqual(
messages[0].body.decode(),
expected_email_text_body)
def test_correct_rights_are_written_in_playtester_role_email_body(self):
expected_email_html_body = (
'Hi newuser,<br>'
'<br>'
'<b>editor</b> has granted you playtest access to their '
'exploration, '
'"<a href="http://www.oppia.org/create/A">Title</a>"'
', on Oppia.org.<br>'
'<br>'
'This allows you to:<br>'
'<ul>'
'<li>View and playtest the exploration</li><br>'
'</ul>'
'You can find the exploration '
'<a href="http://www.oppia.org/create/A">here</a>.<br>'
'<br>'
'Thanks, and happy collaborating!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body = (
'Hi newuser,\n'
'\n'
'editor has granted you playtest access to their '
'exploration, "Title", on Oppia.org.\n'
'\n'
'This allows you to:\n'
'- View and playtest the exploration\n'
'You can find the exploration here.\n'
'\n'
'Thanks, and happy collaborating!\n'
'\n'
'Best wishes,\n'
'The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_editor_role_email_ctx:
# Check that correct email content is sent for Playtester.
email_manager.send_role_notification_email(
self.editor_id, self.new_user_id, rights_manager.ROLE_VIEWER,
self.exploration.id, self.exploration.title)
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html.decode(),
expected_email_html_body)
self.assertEqual(
messages[0].body.decode(),
expected_email_text_body)
def test_correct_undefined_role_raises_an_exception(self):
with self.can_send_emails_ctx, self.can_send_editor_role_email_ctx:
# Check that an exception is raised when an invalid
# role is supplied.
with self.assertRaisesRegexp(Exception, 'Invalid role'):
email_manager.send_role_notification_email(
self.editor_id, self.new_user_id, rights_manager.ROLE_NONE,
self.exploration.id, self.exploration.title)
class SignupEmailTests(test_utils.GenericTestBase):
"""Test that signup-email sending functionality works as expected."""
def setUp(self):
super(SignupEmailTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.new_footer = (
'Unsubscribe from emails at your '
'<a href="https://www.site.com/prefs">Preferences page</a>.')
self.new_email_content = {
'subject': 'Welcome!',
'html_body': (
'Here is some HTML text.<br>'
'With a <b>bold</b> bit and an <i>italic</i> bit.<br>')
}
self.expected_text_email_content = (
'Hi editor,\n'
'\n'
'Here is some HTML text.\n'
'With a bold bit and an italic bit.\n'
'\n'
'\n'
'Unsubscribe from emails at your Preferences page.')
self.expected_html_email_content = (
'Hi editor,<br>'
'<br>'
'Here is some HTML text.<br>'
'With a <b>bold</b> bit and an <i>italic</i> bit.<br>'
'<br>'
'<br>'
'Unsubscribe from emails at your '
'<a href="https://www.site.com/prefs">Preferences page</a>.')
def test_email_not_sent_if_config_does_not_permit_it(self):
with self.swap(feconf, 'CAN_SEND_EMAILS', False):
config_services.set_property(
self.admin_id, email_manager.EMAIL_FOOTER.name,
self.new_footer)
config_services.set_property(
self.admin_id, email_manager.SIGNUP_EMAIL_CONTENT.name,
self.new_email_content)
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(feconf.SIGNUP_DATA_URL, {
'agreed_to_terms': True,
'username': self.EDITOR_USERNAME
}, csrf_token=csrf_token)
# Check that no email was sent.
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(0, len(messages))
def test_email_not_sent_if_content_config_is_not_modified(self):
can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
logged_errors = []
def _log_error_for_tests(error_message):
logged_errors.append(error_message)
log_new_error_counter = test_utils.CallCounter(_log_error_for_tests)
log_new_error_ctx = self.swap(
email_manager, 'log_new_error', log_new_error_counter)
with can_send_emails_ctx, log_new_error_ctx:
self.assertEqual(log_new_error_counter.times_called, 0)
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
# No user-facing error should surface.
self.post_json(feconf.SIGNUP_DATA_URL, {
'agreed_to_terms': True,
'username': self.EDITOR_USERNAME
}, csrf_token=csrf_token)
# However, an error should be recorded in the logs.
self.assertEqual(log_new_error_counter.times_called, 1)
self.assertEqual(
logged_errors[0],
'Please ensure that the value for the admin config property '
'SIGNUP_EMAIL_CONTENT is set, before allowing post-signup '
'emails to be sent.')
# Check that no email was sent.
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(0, len(messages))
def test_email_not_sent_if_content_config_is_partially_modified(self):
can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
config_services.set_property(
self.admin_id, email_manager.SIGNUP_EMAIL_CONTENT.name, {
'subject': (
email_manager.SIGNUP_EMAIL_CONTENT.default_value[
'subject']),
'html_body': 'New HTML body.',
})
logged_errors = []
def _log_error_for_tests(error_message):
logged_errors.append(error_message)
log_new_error_counter = test_utils.CallCounter(_log_error_for_tests)
log_new_error_ctx = self.swap(
email_manager, 'log_new_error', log_new_error_counter)
with can_send_emails_ctx, log_new_error_ctx:
self.assertEqual(log_new_error_counter.times_called, 0)
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
# No user-facing error should surface.
self.post_json(feconf.SIGNUP_DATA_URL, {
'agreed_to_terms': True,
'username': self.EDITOR_USERNAME
}, csrf_token=csrf_token)
# However, an error should be recorded in the logs.
self.assertEqual(log_new_error_counter.times_called, 1)
self.assertEqual(
logged_errors[0],
'Please ensure that the value for the admin config property '
'SIGNUP_EMAIL_CONTENT is set, before allowing post-signup '
'emails to be sent.')
# Check that no email was sent.
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(0, len(messages))
def test_email_with_bad_content_is_not_sent(self):
can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
config_services.set_property(
self.admin_id, email_manager.SIGNUP_EMAIL_CONTENT.name, {
'subject': 'New email subject',
'html_body': 'New HTML body.<script>alert(3);</script>',
})
logged_errors = []
def _log_error_for_tests(error_message):
logged_errors.append(error_message)
log_new_error_counter = test_utils.CallCounter(_log_error_for_tests)
log_new_error_ctx = self.swap(
email_manager, 'log_new_error', log_new_error_counter)
with can_send_emails_ctx, log_new_error_ctx:
self.assertEqual(log_new_error_counter.times_called, 0)
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
# No user-facing error should surface.
self.post_json(feconf.SIGNUP_DATA_URL, {
'agreed_to_terms': True,
'username': self.EDITOR_USERNAME
}, csrf_token=csrf_token)
# However, an error should be recorded in the logs.
self.assertEqual(log_new_error_counter.times_called, 1)
self.assertTrue(logged_errors[0].startswith(
'Original email HTML body does not match cleaned HTML body'))
# Check that no email was sent.
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(0, len(messages))
def test_contents_of_signup_email_are_correct(self):
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
config_services.set_property(
self.admin_id, email_manager.EMAIL_FOOTER.name,
self.new_footer)
config_services.set_property(
self.admin_id, email_manager.SIGNUP_EMAIL_CONTENT.name,
self.new_email_content)
config_services.set_property(
self.admin_id, email_manager.EMAIL_SENDER_NAME.name,
'Email Sender')
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(feconf.SIGNUP_DATA_URL, {
'agreed_to_terms': True,
'username': self.EDITOR_USERNAME
}, csrf_token=csrf_token)
# Check that an email was sent with the correct content.
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(1, len(messages))
self.assertEqual(
messages[0].sender,
'Email Sender <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(messages[0].to, self.EDITOR_EMAIL)
self.assertEqual(messages[0].subject, 'Welcome!')
self.assertEqual(
messages[0].body.decode(), self.expected_text_email_content)
self.assertEqual(
messages[0].html.decode(), self.expected_html_email_content)
def test_email_only_sent_once_for_repeated_signups_by_same_user(self):
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
config_services.set_property(
self.admin_id, email_manager.EMAIL_FOOTER.name,
self.new_footer)
config_services.set_property(
self.admin_id, email_manager.SIGNUP_EMAIL_CONTENT.name,
self.new_email_content)
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(feconf.SIGNUP_DATA_URL, {
'agreed_to_terms': True,
'username': self.EDITOR_USERNAME
}, csrf_token=csrf_token)
# Check that an email was sent.
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(1, len(messages))
# Send a second POST request.
self.post_json(feconf.SIGNUP_DATA_URL, {
'agreed_to_terms': True,
'username': self.EDITOR_USERNAME
}, csrf_token=csrf_token)
# Check that no new email was sent.
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(1, len(messages))
def test_email_only_sent_if_signup_was_successful(self):
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
config_services.set_property(
self.admin_id, email_manager.EMAIL_FOOTER.name,
self.new_footer)
config_services.set_property(
self.admin_id, email_manager.SIGNUP_EMAIL_CONTENT.name,
self.new_email_content)
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
feconf.SIGNUP_DATA_URL,
{
'agreed_to_terms': True,
'username': 'BadUsername!!!'
},
csrf_token=csrf_token,
expect_errors=True,
expected_status_int=400)
# Check that no email was sent.
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(0, len(messages))
# Redo the signup process with a good username.
self.post_json(feconf.SIGNUP_DATA_URL, {
'agreed_to_terms': True,
'username': self.EDITOR_USERNAME
}, csrf_token=csrf_token)
# Check that a new email was sent.
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(1, len(messages))
def test_record_of_sent_email_is_written_to_datastore(self):
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
config_services.set_property(
self.admin_id, email_manager.EMAIL_FOOTER.name,
self.new_footer)
config_services.set_property(
self.admin_id, email_manager.SIGNUP_EMAIL_CONTENT.name,
self.new_email_content)
config_services.set_property(
self.admin_id, email_manager.EMAIL_SENDER_NAME.name,
'Email Sender')
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 0)
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(feconf.SIGNUP_DATA_URL, {
'agreed_to_terms': True,
'username': self.EDITOR_USERNAME
}, csrf_token=csrf_token)
# Check that a new email was sent.
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(1, len(messages))
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
# Check that the contents of the model are correct.
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.recipient_id,
self.get_user_id_from_email(self.EDITOR_EMAIL))
self.assertEqual(
sent_email_model.recipient_email, self.EDITOR_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Email Sender <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent, feconf.EMAIL_INTENT_SIGNUP)
self.assertEqual(
sent_email_model.subject, 'Welcome!')
self.assertEqual(
sent_email_model.html_body, self.expected_html_email_content)
class DuplicateEmailTests(test_utils.GenericTestBase):
"""Test that duplicate emails are not sent"""
def setUp(self):
super(DuplicateEmailTests, self).setUp()
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.new_footer = (
'Unsubscribe from emails at your '
'<a href="https://www.site.com/prefs">Preferences page</a>.')
self.new_email_content = {
'subject': 'Welcome!',
'html_body': (
'Here is some HTML text.<br>'
'With a <b>bold</b> bit and an <i>italic</i> bit.<br>')
}
# pylint: disable=unused-argument
def _generate_hash_for_tests(
cls, recipient_id, email_subject, email_body):
return 'Email Hash'
self.generate_hash_ctx = self.swap(
email_models.SentEmailModel, '_generate_hash',
types.MethodType(_generate_hash_for_tests,
email_models.SentEmailModel))
def test_send_email_does_not_resend_if_same_hash_exists(self):
can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
duplicate_email_ctx = self.swap(
feconf, 'DUPLICATE_EMAIL_INTERVAL_MINS', 1000)
logged_errors = []
def _log_error_for_tests(error_message):
logged_errors.append(error_message)
log_new_error_counter = test_utils.CallCounter(_log_error_for_tests)
log_new_error_ctx = self.swap(
email_manager, 'log_new_error', log_new_error_counter)
with can_send_emails_ctx, duplicate_email_ctx, log_new_error_ctx:
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 0)
email_models.SentEmailModel.create(
self.new_user_id, self.NEW_USER_EMAIL,
feconf.SYSTEM_COMMITTER_ID, feconf.SYSTEM_EMAIL_ADDRESS,
feconf.EMAIL_INTENT_SIGNUP, 'Email Subject', 'Email Body',
datetime.datetime.utcnow())
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
# pylint: disable=protected-access
email_manager._send_email(
self.new_user_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_SIGNUP, 'Email Subject', 'Email Body',
feconf.SYSTEM_EMAIL_ADDRESS)
# pylint: enable=protected-access
# An error should be recorded in the logs.
self.assertEqual(log_new_error_counter.times_called, 1)
self.assertRegexpMatches(logged_errors[0], 'Duplicate email')
# Check that a new email was not sent.
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(0, len(messages))
# Check that the content of this email was not recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
def test_send_email_does_not_resend_within_duplicate_interval(self):
can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
duplicate_email_ctx = self.swap(
feconf, 'DUPLICATE_EMAIL_INTERVAL_MINS', 2)
logged_errors = []
def _log_error_for_tests(error_message):
logged_errors.append(error_message)
log_new_error_counter = test_utils.CallCounter(_log_error_for_tests)
log_new_error_ctx = self.swap(
email_manager, 'log_new_error', log_new_error_counter)
with can_send_emails_ctx, duplicate_email_ctx, log_new_error_ctx:
config_services.set_property(
self.admin_id, email_manager.EMAIL_SENDER_NAME.name,
'Email Sender')
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 0)
# pylint: disable=protected-access
email_manager._send_email(
self.new_user_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_SIGNUP, 'Email Subject', 'Email Body',
feconf.SYSTEM_EMAIL_ADDRESS)
# Check that a new email was sent.
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(1, len(messages))
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
# No error should be recorded in the logs.
self.assertEqual(log_new_error_counter.times_called, 0)
email_manager._send_email(
self.new_user_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_SIGNUP, 'Email Subject', 'Email Body',
feconf.SYSTEM_EMAIL_ADDRESS)
# pylint: enable=protected-access
# An error should be recorded in the logs.
self.assertEqual(log_new_error_counter.times_called, 1)
self.assertRegexpMatches(logged_errors[0], 'Duplicate email')
# Check that a new email was not sent.
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(1, len(messages))
# Check that the content of this email was not recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
def test_sending_email_with_different_recipient_but_same_hash(self):
"""Hash for both messages is same but recipients are different"""
can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
duplicate_email_ctx = self.swap(
feconf, 'DUPLICATE_EMAIL_INTERVAL_MINS', 2)
with can_send_emails_ctx, duplicate_email_ctx, self.generate_hash_ctx:
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 0)
email_models.SentEmailModel.create(
'recipient_id', self.NEW_USER_EMAIL,
feconf.SYSTEM_COMMITTER_ID, feconf.SYSTEM_EMAIL_ADDRESS,
feconf.EMAIL_INTENT_SIGNUP, 'Email Subject', 'Email Body',
datetime.datetime.utcnow())
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
# pylint: disable=protected-access
email_manager._send_email(
self.new_user_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_SIGNUP, 'Email Subject', 'Email Body',
feconf.SYSTEM_EMAIL_ADDRESS)
# pylint: enable=protected-access
# Check that a new email was sent.
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(1, len(messages))
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 2)
# Check that the contents of the model are correct.
sent_email_model1 = all_models[0]
sent_email_model2 = all_models[1]
self.assertEqual(
sent_email_model1.email_hash, sent_email_model2.email_hash)
self.assertNotEqual(
sent_email_model1.recipient_id, sent_email_model2.recipient_id)
self.assertEqual(
sent_email_model1.subject, sent_email_model2.subject)
self.assertEqual(
sent_email_model1.html_body, sent_email_model2.html_body)
def test_sending_email_with_different_subject_but_same_hash(self):
"""Hash for both messages is same but subjects are different"""
can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
duplicate_email_ctx = self.swap(
feconf, 'DUPLICATE_EMAIL_INTERVAL_MINS', 2)
with can_send_emails_ctx, duplicate_email_ctx, self.generate_hash_ctx:
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 0)
email_models.SentEmailModel.create(
self.new_user_id, self.NEW_USER_EMAIL,
feconf.SYSTEM_COMMITTER_ID, feconf.SYSTEM_EMAIL_ADDRESS,
feconf.EMAIL_INTENT_SIGNUP, 'Email Subject1', 'Email Body',
datetime.datetime.utcnow())
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
# pylint: disable=protected-access
email_manager._send_email(
self.new_user_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_SIGNUP, 'Email Subject', 'Email Body',
feconf.SYSTEM_EMAIL_ADDRESS)
# pylint: enable=protected-access
# Check that a new email was sent.
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(1, len(messages))
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 2)
# Check that the contents of the model are correct.
sent_email_model1 = all_models[0]
sent_email_model2 = all_models[1]
self.assertEqual(
sent_email_model1.email_hash, sent_email_model2.email_hash)
self.assertEqual(
sent_email_model1.recipient_id, sent_email_model2.recipient_id)
self.assertNotEqual(
sent_email_model1.subject, sent_email_model2.subject)
self.assertEqual(
sent_email_model1.html_body, sent_email_model2.html_body)
def test_sending_email_with_different_body_but_same_hash(self):
"""Hash for both messages is same but body is different"""
can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
duplicate_email_ctx = self.swap(
feconf, 'DUPLICATE_EMAIL_INTERVAL_MINS', 2)
with can_send_emails_ctx, duplicate_email_ctx, self.generate_hash_ctx:
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 0)
email_models.SentEmailModel.create(
self.new_user_id, self.NEW_USER_EMAIL,
feconf.SYSTEM_COMMITTER_ID, feconf.SYSTEM_EMAIL_ADDRESS,
feconf.EMAIL_INTENT_SIGNUP, 'Email Subject', 'Email Body1',
datetime.datetime.utcnow())
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
# pylint: disable=protected-access
email_manager._send_email(
self.new_user_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_SIGNUP, 'Email Subject', 'Email Body',
feconf.SYSTEM_EMAIL_ADDRESS)
# pylint: enable=protected-access
# Check that a new email was sent.
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(1, len(messages))
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 2)
# Check that the contents of the model are correct.
sent_email_model1 = all_models[0]
sent_email_model2 = all_models[1]
self.assertEqual(
sent_email_model1.email_hash, sent_email_model2.email_hash)
self.assertEqual(
sent_email_model1.recipient_id, sent_email_model2.recipient_id)
self.assertEqual(
sent_email_model1.subject, sent_email_model2.subject)
self.assertNotEqual(
sent_email_model1.html_body, sent_email_model2.html_body)
def test_duplicate_emails_are_sent_after_some_time_has_elapsed(self):
can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
duplicate_email_ctx = self.swap(
feconf, 'DUPLICATE_EMAIL_INTERVAL_MINS', 2)
with can_send_emails_ctx, duplicate_email_ctx:
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 0)
email_sent_time = (datetime.datetime.utcnow() -
datetime.timedelta(minutes=4))
email_models.SentEmailModel.create(
self.new_user_id, self.NEW_USER_EMAIL,
feconf.SYSTEM_COMMITTER_ID, feconf.SYSTEM_EMAIL_ADDRESS,
feconf.EMAIL_INTENT_SIGNUP, 'Email Subject', 'Email Body',
email_sent_time)
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
email_sent_time = (datetime.datetime.utcnow() -
datetime.timedelta(minutes=2))
email_models.SentEmailModel.create(
self.new_user_id, self.NEW_USER_EMAIL,
feconf.SYSTEM_COMMITTER_ID, feconf.SYSTEM_EMAIL_ADDRESS,
feconf.EMAIL_INTENT_SIGNUP, 'Email Subject', 'Email Body',
email_sent_time)
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 2)
# pylint: disable=protected-access
email_manager._send_email(
self.new_user_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_SIGNUP, 'Email Subject', 'Email Body',
feconf.SYSTEM_EMAIL_ADDRESS)
# pylint: enable=protected-access
# Check that a new email was sent.
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(1, len(messages))
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 3)
# Check that the contents of the model are correct.
sent_email_model1 = all_models[0]
sent_email_model2 = all_models[1]
sent_email_model3 = all_models[2]
self.assertEqual(
sent_email_model1.email_hash, sent_email_model2.email_hash)
self.assertEqual(
sent_email_model1.email_hash, sent_email_model3.email_hash)
class FeedbackMessageBatchEmailTests(test_utils.GenericTestBase):
def setUp(self):
super(FeedbackMessageBatchEmailTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.exploration = self.save_new_default_exploration(
'A', self.editor_id, 'Title')
self.expected_email_subject = (
'You\'ve received 1 new message on your explorations')
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
self.can_send_feedback_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', True)
def test_correct_email_body_is_sent(self):
expected_email_html_body = (
'Hi editor,<br>'
'<br>'
'You\'ve received 1 new message on your Oppia explorations:<br>'
'<ul><li>Title: A message<br></li></ul>'
'You can view and reply to your messages from your '
'<a href="https://www.oppia.org/dashboard">dashboard</a>.'
'<br>'
'Thanks, and happy teaching!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body = (
'Hi editor,\n'
'\n'
'You\'ve received 1 new message on your Oppia explorations:\n'
'- Title: A message\n'
'You can view and reply to your messages from your dashboard.'
'\n'
'Thanks, and happy teaching!\n'
'\n'
'Best wishes,\n'
'The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
feedback_messages = {
self.exploration.id : {
'title': self.exploration.title,
'messages': ['A message']}
}
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
email_manager.send_feedback_message_email(
self.editor_id, feedback_messages)
# check that email body is correct.
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html.decode(),
expected_email_html_body)
self.assertEqual(
messages[0].body.decode(),
expected_email_text_body)
# check that email model is correct.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.recipient_id, self.editor_id)
self.assertEqual(
sent_email_model.recipient_email, self.EDITOR_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION)
self.assertEqual(
sent_email_model.subject, self.expected_email_subject)
class SuggestionEmailTest(test_utils.GenericTestBase):
def setUp(self):
super(SuggestionEmailTest, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.exploration = self.save_new_default_exploration(
'A', self.editor_id, 'Title')
self.recipient_list = [self.editor_id]
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
self.can_send_feedback_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', True)
def test_that_suggestion_emails_are_correct(self):
expected_email_subject = 'New suggestion for "Title"'
expected_email_html_body = (
'Hi editor,<br>'
'newuser has submitted a new suggestion for your Oppia '
'exploration, '
'<a href="https://www.oppia.org/create/A">"Title"</a>.<br>'
'You can accept or reject this suggestion by visiting the '
'<a href="https://www.oppia.org/create/A#/feedback">'
'feedback page</a> '
'for your exploration.<br>'
'<br>'
'Thanks!<br>'
'- The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body = (
'Hi editor,\n'
'newuser has submitted a new suggestion for your Oppia '
'exploration, "Title".\n'
'You can accept or reject this suggestion by visiting the '
'feedback page for your exploration.\n'
'\n'
'Thanks!\n'
'- The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
email_manager.send_suggestion_email(
self.exploration.title, self.exploration.id, self.new_user_id,
self.recipient_list)
# make sure correct email is sent.
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html.decode(),
expected_email_html_body)
self.assertEqual(
messages[0].body.decode(),
expected_email_text_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.subject, expected_email_subject)
self.assertEqual(
sent_email_model.recipient_id, self.editor_id)
self.assertEqual(
sent_email_model.recipient_email, self.EDITOR_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_SUGGESTION_NOTIFICATION)
class SubscriptionEmailTest(test_utils.GenericTestBase):
def setUp(self):
super(SubscriptionEmailTest, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.exploration = self.save_new_default_exploration(
'A', self.editor_id, 'Title')
subscription_services.subscribe_to_creator(
self.new_user_id, self.editor_id)
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
self.can_send_subscription_email_ctx = self.swap(
feconf, 'CAN_SEND_SUBSCRIPTION_EMAILS', True)
def test_that_subscription_emails_are_correct(self):
expected_email_subject = 'editor has published a new exploration!'
expected_email_html_body = (
'Hi newuser,<br>'
'<br>'
'editor has published a new exploration! You can play it here: '
'<a href="https://www.oppia.org/explore/A">Title</a><br>'
'<br>'
'Thanks, and happy learning!<br>'
'<br>'
'Best wishes,<br>'
'- The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body = (
'Hi newuser,\n'
'\n'
'editor has published a new exploration! You can play it here: '
'Title\n'
'\n'
'Thanks, and happy learning!\n'
'\n'
'Best wishes,\n'
'- The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_subscription_email_ctx:
email_manager.send_emails_to_subscribers(
self.editor_id, self.exploration.id, self.exploration.title)
# make sure correct email is sent.
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html.decode(),
expected_email_html_body)
self.assertEqual(
messages[0].body.decode(),
expected_email_text_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.subject, expected_email_subject)
self.assertEqual(
sent_email_model.recipient_id, self.new_user_id)
self.assertEqual(
sent_email_model.recipient_email, self.NEW_USER_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_SUBSCRIPTION_NOTIFICATION)
class FeedbackMessageInstantEmailTests(test_utils.GenericTestBase):
def setUp(self):
super(FeedbackMessageInstantEmailTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.exploration = self.save_new_default_exploration(
'A', self.editor_id, 'Title')
self.recipient_list = [self.editor_id]
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
self.can_send_feedback_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', True)
def test_that_feedback_message_emails_are_correct(self):
expected_email_subject = 'New Oppia message in "a subject"'
expected_email_html_body = (
'Hi newuser,<br><br>'
'New update to thread "a subject" on '
'<a href="https://www.oppia.org/create/A#/feedback">Title</a>:<br>'
'<ul><li>editor: editor message<br></li></ul>'
'(You received this message because you are a '
'participant in this thread.)<br><br>'
'Best wishes,<br>'
'The Oppia team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body = (
'Hi newuser,\n'
'\n'
'New update to thread "a subject" on Title:\n'
'- editor: editor message\n'
'(You received this message because you are a'
' participant in this thread.)\n'
'\n'
'Best wishes,\n'
'The Oppia team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
email_manager.send_instant_feedback_message_email(
self.new_user_id, self.editor_id, 'editor message',
'New Oppia message in "a subject"', self.exploration.title,
self.exploration.id, 'a subject')
# Make sure correct email is sent.
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html.decode(),
expected_email_html_body)
self.assertEqual(
messages[0].body.decode(),
expected_email_text_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.subject, expected_email_subject)
self.assertEqual(
sent_email_model.recipient_id, self.new_user_id)
self.assertEqual(
sent_email_model.recipient_email, self.NEW_USER_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION)
class FlagExplorationEmailTest(test_utils.GenericTestBase):
"""Test that emails are sent to moderators when explorations are flagged."""
def setUp(self):
super(FlagExplorationEmailTest, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.moderator_id = self.get_user_id_from_email(self.MODERATOR_EMAIL)
self.moderator2_email = 'moderator2@example.com'
self.moderator2_username = 'moderator2'
self.signup(self.moderator2_email, self.moderator2_username)
self.moderator2_id = self.get_user_id_from_email(self.moderator2_email)
self.set_moderators([self.moderator2_username, self.MODERATOR_USERNAME])
self.exploration = self.save_new_default_exploration(
'A', self.editor_id, 'Title')
self.owner_ids = [self.editor_id]
self.report_text = 'AD'
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
def test_that_flag_exploration_emails_are_correct(self):
expected_email_subject = 'Exploration flagged by user: "Title"'
expected_email_html_body = (
'Hello Moderator,<br>'
'newuser has flagged exploration "Title" on the following '
'grounds: <br>'
'AD .<br>'
'You can modify the exploration by clicking '
'<a href="https://www.oppia.org/create/A">'
'here</a>.<br>'
'<br>'
'Thanks!<br>'
'- The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body = (
'Hello Moderator,\n'
'newuser has flagged exploration "Title" on the following '
'grounds: \n'
'AD .\n'
'You can modify the exploration by clicking here.\n'
'\n'
'Thanks!\n'
'- The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx:
email_manager.send_flag_exploration_email(
self.exploration.title, self.exploration.id, self.new_user_id,
self.report_text)
# Make sure correct email is sent.
messages = self.mail_stub.get_sent_messages(to=self.MODERATOR_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html.decode(),
expected_email_html_body)
self.assertEqual(
messages[0].body.decode(),
expected_email_text_body)
# Make sure correct email is sent to multiple moderators.
messages = self.mail_stub.get_sent_messages(
to=self.moderator2_email)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html.decode(),
expected_email_html_body)
self.assertEqual(
messages[0].body.decode(),
expected_email_text_body)
# Make sure correct email models are stored.
all_models = email_models.SentEmailModel.get_all().fetch()
all_models.sort(key=lambda x: x.recipient_id)
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.subject, expected_email_subject)
self.assertEqual(
sent_email_model.recipient_id, self.moderator_id)
self.assertEqual(
sent_email_model.recipient_email, self.MODERATOR_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_REPORT_BAD_CONTENT)
sent_email_model = all_models[1]
self.assertEqual(
sent_email_model.subject, expected_email_subject)
self.assertEqual(
sent_email_model.recipient_id, self.moderator2_id)
self.assertEqual(
sent_email_model.recipient_email, self.moderator2_email)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_REPORT_BAD_CONTENT)
class QueryStatusNotificationEmailTests(test_utils.GenericTestBase):
"""Test that email is send to submitter when query has completed or failed.
"""
SUBMITTER_USERNAME = 'submit'
SUBMITTER_EMAIL = 'submit@example.com'
def setUp(self):
super(QueryStatusNotificationEmailTests, self).setUp()
self.signup(self.SUBMITTER_EMAIL, self.SUBMITTER_USERNAME)
self.submitter_id = self.get_user_id_from_email(self.SUBMITTER_EMAIL)
self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True)
def test_that_correct_completion_email_is_sent(self):
query_id = 'qid'
expected_email_subject = 'Query qid has successfully completed'
expected_email_html_body = (
'Hi submit,<br>'
'Your query with id qid has succesfully completed its '
'execution. Visit the result page '
'<a href="https://www.oppia.org/emaildashboardresult/qid">here</a> '
'to see result of your query.<br><br>'
'Thanks!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body = (
'Hi submit,\n'
'Your query with id qid has succesfully completed its '
'execution. Visit the result page here '
'to see result of your query.\n\n'
'Thanks!\n'
'\n'
'Best wishes,\n'
'The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx:
email_manager.send_query_completion_email(
self.submitter_id, query_id)
# Make sure correct email is sent.
messages = self.mail_stub.get_sent_messages(to=self.SUBMITTER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html.decode(), expected_email_html_body)
self.assertEqual(
messages[0].body.decode(), expected_email_text_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.subject, expected_email_subject)
self.assertEqual(
sent_email_model.recipient_id, self.submitter_id)
self.assertEqual(
sent_email_model.recipient_email, self.SUBMITTER_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_QUERY_STATUS_NOTIFICATION)
def test_that_correct_failure_email_is_sent(self):
query_id = 'qid'
query_params = {
'key1': 'val1',
'key2': 'val2'
}
expected_email_subject = 'Query qid has failed'
expected_email_html_body = (
'Hi submit,<br>'
'Your query with id qid has failed due to error '
'during execution. '
'Please check the query parameters and submit query again.<br><br>'
'Thanks!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body = (
'Hi submit,\n'
'Your query with id qid has failed due to error '
'during execution. '
'Please check the query parameters and submit query again.\n\n'
'Thanks!\n'
'\n'
'Best wishes,\n'
'The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
expected_admin_email_text_body = (
'(Sent from testbed-test)\n\n'
'Query job with qid query id has failed in its execution.\n'
'Query parameters:\n\n'
'key1: val1\n'
'key2: val2\n')
with self.can_send_emails_ctx:
email_manager.send_query_failure_email(
self.submitter_id, query_id, query_params)
# Make sure correct email is sent.
messages = self.mail_stub.get_sent_messages(to=self.SUBMITTER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html.decode(), expected_email_html_body)
self.assertEqual(
messages[0].body.decode(), expected_email_text_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.subject, expected_email_subject)
self.assertEqual(
sent_email_model.recipient_id, self.submitter_id)
self.assertEqual(
sent_email_model.recipient_email, self.SUBMITTER_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_QUERY_STATUS_NOTIFICATION)
# Make sure that correct email is sent to admin.
admin_messages = self.mail_stub.get_sent_messages(
to=feconf.ADMIN_EMAIL_ADDRESS)
self.assertEqual(len(admin_messages), 1)
self.assertEqual(
admin_messages[0].body.decode(), expected_admin_email_text_body)
class BulkEmailsTests(test_utils.GenericTestBase):
SENDER_EMAIL = 'sender@example.com'
SENDER_USERNAME = 'sender'
FAKE_SENDER_EMAIL = 'fake@example.com'
FAKE_SENDER_USERNAME = 'fake'
RECIPIENT_A_EMAIL = 'a@example.com'
RECIPIENT_A_USERNAME = 'usera'
RECIPIENT_B_EMAIL = 'b@example.com'
RECIPIENT_B_USERNAME = 'userb'
def setUp(self):
super(BulkEmailsTests, self).setUp()
# SENDER is authorised sender.
# FAKE_SENDER is unauthorised sender.
# A and B are recipients.
self.signup(self.SENDER_EMAIL, self.SENDER_USERNAME)
self.sender_id = self.get_user_id_from_email(self.SENDER_EMAIL)
self.signup(self.FAKE_SENDER_EMAIL, self.FAKE_SENDER_USERNAME)
self.fake_sender_id = self.get_user_id_from_email(
self.FAKE_SENDER_EMAIL)
self.signup(self.RECIPIENT_A_EMAIL, self.RECIPIENT_A_USERNAME)
self.signup(self.RECIPIENT_B_EMAIL, self.RECIPIENT_B_USERNAME)
self.recipient_a_id = self.get_user_id_from_email(
self.RECIPIENT_A_EMAIL)
self.recipient_b_id = self.get_user_id_from_email(
self.RECIPIENT_B_EMAIL)
self.recipient_ids = [self.recipient_a_id, self.recipient_b_id]
config_services.set_property(
self.sender_id, 'whitelisted_email_senders',
[self.SENDER_USERNAME])
self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True)
def test_that_correct_email_is_sent(self):
email_subject = 'Dummy subject'
email_html_body = 'Dummy email body.<br>'
email_text_body = 'Dummy email body.\n'
with self.can_send_emails_ctx:
# pylint: disable=protected-access
email_manager._send_bulk_mail(
self.recipient_ids, self.sender_id,
feconf.BULK_EMAIL_INTENT_MARKETING, email_subject,
email_html_body, self.SENDER_EMAIL, self.SENDER_USERNAME)
# pylint: enable=protected-access
messages_a = self.mail_stub.get_sent_messages(to=self.RECIPIENT_A_EMAIL)
self.assertEqual(len(messages_a), 1)
self.assertEqual(
messages_a[0].html.decode(), email_html_body)
self.assertEqual(
messages_a[0].body.decode(), email_text_body)
messages_b = self.mail_stub.get_sent_messages(to=self.RECIPIENT_B_EMAIL)
self.assertEqual(len(messages_b), 1)
self.assertEqual(
messages_b[0].html.decode(), email_html_body)
self.assertEqual(
messages_b[0].body.decode(), email_text_body)
# Make sure correct email model is stored.
all_models = email_models.BulkEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.subject, email_subject)
self.assertEqual(
sent_email_model.html_body, email_html_body)
self.assertEqual(
sent_email_model.recipient_ids, self.recipient_ids)
self.assertEqual(
sent_email_model.sender_id, self.sender_id)
self.assertEqual(
sent_email_model.sender_email,
'%s <%s>' % (self.SENDER_USERNAME, self.SENDER_EMAIL))
self.assertEqual(
sent_email_model.intent,
feconf.BULK_EMAIL_INTENT_MARKETING)
def test_that_exception_is_raised_for_unauthorised_sender(self):
with self.can_send_emails_ctx, self.assertRaisesRegexp(
Exception, 'Invalid sender_id for email'):
# pylint: disable=protected-access
email_manager._send_bulk_mail(
self.recipient_ids, self.fake_sender_id,
feconf.BULK_EMAIL_INTENT_MARKETING, 'email_subject',
'email_html_body', self.FAKE_SENDER_EMAIL,
self.FAKE_SENDER_USERNAME)
# pylint: enable=protected-access
messages_a = self.mail_stub.get_sent_messages(to=self.RECIPIENT_A_EMAIL)
self.assertEqual(len(messages_a), 0)
messages_b = self.mail_stub.get_sent_messages(to=self.RECIPIENT_B_EMAIL)
self.assertEqual(len(messages_b), 0)
all_models = email_models.BulkEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 0)
class EmailPreferencesTests(test_utils.GenericTestBase):
def test_can_users_receive_thread_email(self):
user_ids = ('someUser1', 'someUser2')
exp_id = 'someExploration'
usernames = ('username1', 'username2')
emails = ('user1@example.com', 'user2@example.com')
for user_id, username, user_email in zip(user_ids, usernames, emails):
user_services.get_or_create_user(user_id, user_email)
user_services.set_username(user_id, username)
# Both users can receive all emails in default setting.
self.assertListEqual(email_manager.can_users_receive_thread_email(
user_ids, exp_id, True), [True, True])
self.assertTrue(email_manager.can_users_receive_thread_email(
user_ids, exp_id, False), [True, True])
# First user have muted feedback notifications for this exploration,
# therefore he should receive only suggestion emails.
user_services.set_email_preferences_for_exploration(
user_ids[0], exp_id, mute_feedback_notifications=True)
self.assertListEqual(email_manager.can_users_receive_thread_email(
user_ids, exp_id, True), [True, True])
self.assertListEqual(email_manager.can_users_receive_thread_email(
user_ids, exp_id, False), [False, True])
# Second user have muted suggestion notifications for this exploration,
# therefore he should receive only feedback emails.
user_services.set_email_preferences_for_exploration(
user_ids[1], exp_id, mute_suggestion_notifications=True)
self.assertListEqual(email_manager.can_users_receive_thread_email(
user_ids, exp_id, True), [True, False])
self.assertListEqual(email_manager.can_users_receive_thread_email(
user_ids, exp_id, False), [False, True])
# Both users have disabled all emails globally, therefore they
# should not receive any emails.
for user_id in user_ids:
user_services.update_email_preferences(user_id, True, True, False,
True)
self.assertListEqual(email_manager.can_users_receive_thread_email(
user_ids, exp_id, True), [False, False])
self.assertTrue(email_manager.can_users_receive_thread_email(
user_ids, exp_id, False), [False, False])
# Both users have unmuted feedback/suggestion emails for this
# exploration, but all emails are still disabled globally,
# therefore they should not receive any emails.
user_services.set_email_preferences_for_exploration(
user_ids[0], exp_id, mute_feedback_notifications=False)
user_services.set_email_preferences_for_exploration(
user_ids[1], exp_id, mute_suggestion_notifications=False)
user_services.update_email_preferences(user_id, True, True, False,
True)
self.assertListEqual(email_manager.can_users_receive_thread_email(
user_ids, exp_id, True), [False, False])
self.assertTrue(email_manager.can_users_receive_thread_email(
user_ids, exp_id, False), [False, False])
# Both user have enabled all emails globally, therefore they should
# receive all emails.
for user_id in user_ids:
user_services.update_email_preferences(user_id, True, True, True,
True)
self.assertListEqual(email_manager.can_users_receive_thread_email(
user_ids, exp_id, True), [True, True])
self.assertTrue(email_manager.can_users_receive_thread_email(
user_ids, exp_id, False), [True, True])
| michaelWagner/oppia | core/domain/email_manager_test.py | Python | apache-2.0 | 78,438 | [
"VisIt"
] | 55edb75e142408aaaeb2051bb7e471bae7774ef3d7d9730cc8671951c1bfa17b |
import wx
import wx.grid
import wx.lib.scrolledpanel
import os
import os.path
import sys
import platform
import glob
import webbrowser
try:
# It may have been installed locally in the sandbox
if (platform.system() == "Windows"):
sys.path.append(os.path.expanduser("~") + "\\InteractiveROSETTA\\molfile2params")
else:
sys.path.append(os.path.expanduser("~") + "/.InteractiveROSETTA/molfile2params")
import molfile_to_params
except:
try:
if (platform.system() == "Windows"):
shutil.rmtree(os.path.expanduser("~") + "\\InteractiveROSETTA\\molfile2params")
else:
shutil.rmtree(os.path.expanduser("~") + "/.InteractiveROSETTA/molfile2params")
except:
pass
dlg = wx.MessageDialog(None, "InteractiveROSETTA was unable to import molfile2params!\nInteractiveROSETTA attempted to remove the previous molfile2params install. Run InteractiveROSETTA again to reinstall it automatically. If it still does not import, please contact the developer.", "Molfile2Params Missing", wx.OK | wx.ICON_ERROR | wx.CENTRE)
dlg.ShowModal()
dlg.Destroy()
exit()
from tools import *
class ResidueCreatorPanel(wx.lib.scrolledpanel.ScrolledPanel):
def __init__(self, parent, W, H):
#if (platform.system() == "Windows"):
wx.lib.scrolledpanel.ScrolledPanel.__init__(self, parent, id=-1, pos=(10, 60), size=(340, H-330), name="ProtSuperimposition")
winh = H-330
#else:
#wx.lib.scrolledpanel.ScrolledPanel.__init__(self, parent, id=-1, pos=(10, 60), size=(340, H-290), name="ProtSuperimposition")
#winh = H-290
self.SetBackgroundColour("#333333")
self.parent = parent
self.sizer = wx.GridBagSizer(5, 5)
#self.sb = wx.ScrollBar(self)
#self.sb.SetScrollRange(10)
#print self.sb.GetScrollRange(0)
if (platform.system() == "Windows"):
self.lblProt = wx.StaticText(self, -1, "Residue/Ligand Creator", (25, 15), (270, 25), style=wx.ALIGN_CENTRE)
self.lblProt.SetFont(wx.Font(12, wx.DEFAULT, wx.ITALIC, wx.BOLD))
elif (platform.system() == "Darwin"):
self.lblProt = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/advresiduecreator/lblLigand.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(25, 15), size=(270, 25))
else:
self.lblProt = wx.StaticText(self, -1, "Residue/Ligand Creator", pos=(60, 15), style=wx.ALIGN_CENTRE)
self.lblProt.SetFont(wx.Font(12, wx.DEFAULT, wx.ITALIC, wx.BOLD))
resizeTextControlForUNIX(self.lblProt, 0, self.GetSize()[0])
self.lblProt.SetForegroundColour("#FFFFFF")
if (platform.system() == "Darwin"):
self.HelpBtn = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/HelpBtn.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(295, 10), size=(25, 25))
else:
self.HelpBtn = wx.Button(self, id=-1, label="?", pos=(295, 10), size=(25, 25))
self.HelpBtn.SetForegroundColour("#0000FF")
self.HelpBtn.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.HelpBtn.Bind(wx.EVT_BUTTON, self.showHelp)
self.HelpBtn.SetToolTipString("Display the help file for this window")
if (platform.system() == "Windows"):
self.lblInst = wx.StaticText(self, -1, "Upload a PDB file containing unrecognized atoms.\nThen select unrecognized residue types to parameterize.", (0, 45), (320, 25), wx.ALIGN_CENTRE)
self.lblInst.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
elif (platform.system() == "Darwin"):
self.lblInst = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/advresiduecreator/lblInstLigand.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(0, 45), size=(320, 50))
else:
self.lblInst = wx.StaticText(self, -1, "Upload a PDB file containing unrecognized atoms.\nThen select unrecognized residue types to parameterize.", pos=(0, 45), style=wx.ALIGN_CENTRE)
self.lblInst.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
resizeTextControlForUNIX(self.lblInst, 0, self.GetSize()[0])
self.lblInst.SetForegroundColour("#FFFFFF")
self.lblMOL2 = wx.StaticText(self, -1, "None Uploaded", pos=(10, 103), size=(180, 25), style=wx.ALIGN_CENTRE)
self.lblMOL2.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
if (platform.system() == "Linux"):
resizeTextControlForUNIX(self.lblMOL2, 10, 180)
self.lblMOL2.SetForegroundColour("#FFFFFF")
if (platform.system() == "Darwin"):
self.btnLoad = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/advresiduecreator/btnLoadMOL2.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(200, 100), size=(110, 25))
else:
self.btnLoad = wx.Button(self, id=-1, label="Load .MOL2", pos=(200, 100), size=(110, 25))
self.btnLoad.SetForegroundColour("#000000")
self.btnLoad.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.btnLoad.Bind(wx.EVT_BUTTON, self.loadMOL2)
self.btnLoad.SetToolTipString("Load a .mol2 file containing a ligand/NCAA")
if (platform.system() == "Darwin"):
self.btnType = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/advresiduecreator/btnType_Ligand.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(40, 140), size=(100, 25))
else:
self.btnType = wx.Button(self, id=-1, label="Ligand", pos=(40, 140), size=(100, 25))
self.btnType.SetForegroundColour("#000000")
self.btnType.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.btnType.Bind(wx.EVT_BUTTON, self.typeToggle)
self.btnType.SetToolTipString("Uploaded .mol2 file represents a ligand")
if (platform.system() == "Darwin"):
self.btnCreate = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/advresiduecreator/btnCreate.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(180, 140), size=(100, 25))
else:
self.btnCreate = wx.Button(self, id=-1, label="Create!", pos=(180, 140), size=(100, 25))
self.btnCreate.SetForegroundColour("#000000")
self.btnCreate.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.btnCreate.Bind(wx.EVT_BUTTON, self.createParams)
self.btnCreate.Disable()
self.btnCreate.SetToolTipString("Parameterize the uploaded .mol2 structure")
self.paramsType = "Ligand"
self.paramsAtoms = []
self.grdParamsAtoms = wx.grid.Grid(self)
self.grdParamsAtoms.CreateGrid(0, 1)
if (winh-265 > 200):
self.grdParamsAtoms.SetSize((320, winh-265))
else:
self.grdParamsAtoms.SetSize((320, 200))
self.grdParamsAtoms.SetPosition((0, 175))
self.grdParamsAtoms.SetLabelFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.grdParamsAtoms.DisableDragColSize()
self.grdParamsAtoms.DisableDragRowSize()
self.grdParamsAtoms.SetColLabelValue(0, "Assigned Type")
self.grdParamsAtoms.Bind(wx.grid.EVT_GRID_CELL_LEFT_CLICK, self.gridClick)
ypos = self.grdParamsAtoms.GetPosition()[1] + self.grdParamsAtoms.GetSize()[1] + 10
if (platform.system() == "Darwin"):
self.atomMenu = wx.ComboBox(self, pos=(10, ypos), size=(140, 25), choices=[], style=wx.CB_READONLY)
else:
self.atomMenu = wx.ComboBox(self, pos=(10, ypos), size=(140, 25), choices=[], style=wx.CB_READONLY | wx.CB_SORT)
self.atomMenu.Bind(wx.EVT_COMBOBOX, self.atomMenuSelect)
self.atomMenu.SetToolTipString("Select .mol2 atoms for editing")
if (platform.system() == "Darwin"):
self.typeMenu = wx.ComboBox(self, pos=(170, ypos), size=(140, 25), choices=[], style=wx.CB_READONLY)
else:
self.typeMenu = wx.ComboBox(self, pos=(170, ypos), size=(140, 25), choices=[], style=wx.CB_READONLY | wx.CB_SORT)
self.typeMenu.Bind(wx.EVT_COMBOBOX, self.typeMenuSelect)
self.typeMenu.SetToolTipString("Change the parameterization type for the selected atom")
# Useful dictionary of atom elements mapped to available types
self.atomtypes = {}
self.atomtypes["H"] = ["Hpol", "Haro", "Hapo"]
self.atomtypes["C"] = ["CH3", "CH2", "CH1", "aroC", "CNH2", "COO", "CAbb", "CObb"]
self.atomtypes["N"] = ["Nlys", "NH2O", "Ntrp", "Nhis", "Npro", "Nbb"]
self.atomtypes["O"] = ["OH", "Oaro", "OOC", "OCbb", "ONH2"]
self.atomtypes["S"] = ["S"]
self.atomtypes["P"] = ["Phos"]
self.atomtypes["F"] = ["F"]
self.atomtypes["CL"] = ["Cl"]
self.atomtypes["BR"] = ["Br"]
self.atomtypes["I"] = ["I"]
self.atomtypes["NA"] = ["Na1p"]
self.atomtypes["K"] = ["K1p"]
self.atomtypes["MG"] = ["Mg2p"]
self.atomtypes["FE"] = ["Fe3p"]
self.atomtypes["CA"] = ["Ca2p"]
self.atomtypes["ZN"] = ["Zn2p"]
if (platform.system() == "Darwin"):
self.lblNterm = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/advresiduecreator/lblNterm.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(10, ypos+35), size=(40, 25))
else:
self.lblNterm = wx.StaticText(self, -1, "Nterm:", pos=(10, ypos+35), size=(40, 25))
self.lblNterm.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
self.lblNterm.SetForegroundColour("#FFFFFF")
self.NtermMenu = wx.ComboBox(self, pos=(60, ypos+30), size=(90, 25), choices=[], style=wx.CB_READONLY)
self.NtermMenu.Bind(wx.EVT_COMBOBOX, self.atomMenuSelect)
self.NtermMenu.SetToolTipString("N-terminus atom for NCAAs")
if (platform.system() == "Darwin"):
self.lblCterm = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/advresiduecreator/lblCterm.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(170, ypos+35), size=(40, 25))
else:
self.lblCterm = wx.StaticText(self, -1, "Cterm:", pos=(170, ypos+35), size=(40, 25))
self.lblCterm.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
self.lblCterm.SetForegroundColour("#FFFFFF")
if (platform.system() == "Darwin"):
self.CtermMenu = wx.ComboBox(self, pos=(220, ypos+30), size=(90, 25), choices=[], style=wx.CB_READONLY)
else:
self.CtermMenu = wx.ComboBox(self, pos=(220, ypos+30), size=(90, 25), choices=[], style=wx.CB_READONLY | wx.CB_SORT)
self.CtermMenu.Bind(wx.EVT_COMBOBOX, self.typeMenuSelect)
self.CtermMenu.SetToolTipString("C-terminus atom for NCAAs")
self.NtermMenu.Disable()
self.CtermMenu.Disable()
if (platform.system() == "Darwin"):
self.lblCode = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/advresiduecreator/lblCode.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(10, ypos+65), size=(40, 25))
else:
self.lblCode = wx.StaticText(self, -1, "Code:", pos=(10, ypos+65), size=(40, 25))
self.lblCode.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
self.lblCode.SetForegroundColour("#FFFFFF")
self.txtCode = wx.TextCtrl(self, -1, pos=(50, ypos+60), size=(40, 25))
self.txtCode.SetValue("UNK")
self.txtCode.SetToolTipString("Three-letter amino acid code for the ligand/NCAA")
if (platform.system() == "Darwin"):
self.btnAdd = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/advresiduecreator/btnAddDB.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(110, ypos+60), size=(200, 25))
else:
self.btnAdd = wx.Button(self, id=-1, label="Add to Database", pos=(110, ypos+60), size=(200, 25))
self.btnAdd.SetForegroundColour("#000000")
self.btnAdd.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
self.btnAdd.Bind(wx.EVT_BUTTON, self.addToDB)
self.btnAdd.Disable()
self.btnAdd.SetToolTipString("Add the ligand/NCAA to the Rosetta database with the selected parameters")
if (platform.system() == "Windows"):
self.lblLine = wx.StaticText(self, -1, "==========================", (0, ypos+90), (320, 20), wx.ALIGN_CENTRE)
self.lblLine.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
elif (platform.system() == "Darwin"):
self.lblLine = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/advresiduecreator/lblLine.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(0, ypos+90), size=(320, 20))
else:
self.lblLine = wx.StaticText(self, -1, "==========================", (0, ypos+90), style=wx.ALIGN_CENTRE)
self.lblLine.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
resizeTextControlForUNIX(self.lblLine, 20, 120)
self.lblLine.SetForegroundColour("#FFFFFF")
self.removeMenu = wx.ComboBox(self, pos=(10, ypos+110), size=(90, 25), choices=[], style=wx.CB_READONLY)
self.removeMenu.Bind(wx.EVT_COMBOBOX, self.resMenuSelect)
self.removeMenu.SetToolTipString("Select residues already parameterized for removal")
if (platform.system() == "Darwin"):
self.btnRemove = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/advresiduecreator/btnRemoveDB.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(110, ypos+110), size=(200, 25))
else:
self.btnRemove = wx.Button(self, id=-1, label="Remove from DB", pos=(110, ypos+110), size=(200, 25))
self.btnRemove.SetForegroundColour("#000000")
self.btnRemove.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.btnRemove.Bind(wx.EVT_BUTTON, self.removeParams)
self.btnRemove.SetToolTipString("Uploaded .mol2 file represents a ligand")
goToSandbox("params")
paramsfiles = glob.glob("*.fa.params")
paramslist = []
for param in paramsfiles:
if (param != "HOH.fa.params"):
paramslist.append(param.split(".fa.params")[0])
self.removeMenu.AppendItems(paramslist)
#self.SetSizerAndFit(self.sizer)
#self.SetupScrolling()
scrollh = self.btnRemove.GetPosition()[1] + self.btnRemove.GetSize()[1] + 5
self.SetScrollbars(1, 1, 320, scrollh)
self.grdParamsAtoms.SetColSize(0, int(self.grdParamsAtoms.GetSize()[0] / 2))
self.grdParamsAtoms.SetRowLabelSize(int(self.grdParamsAtoms.GetSize()[0] / 2))
self.winscrollpos = 0
self.Bind(wx.EVT_SCROLLWIN, self.scrolled)
def showHelp(self, event):
# Open the help page
if (platform.system() == "Darwin"):
try:
browser = webbrowser.get("Safari")
except:
print "Could not load Safari! The help files are located at " + self.scriptdir + "/help"
return
browser.open(self.parent.parent.scriptdir + "/help/ligand.html")
else:
webbrowser.open(self.parent.parent.scriptdir + "/help/ligand.html")
def setSeqWin(self, seqWin):
self.seqWin = seqWin
def setPyMOL(self, pymol):
self.pymol = pymol
self.cmd = pymol.cmd
self.stored = pymol.stored
def gridClick(self, event):
self.atomMenu.SetSelection(event.GetRow())
self.atomMenuSelect(event)
event.Skip()
def scrolled(self, event):
self.winscrollpos = self.GetScrollPos(wx.VERTICAL)
event.Skip()
def activate(self):
self.Scroll(0, self.winscrollpos)
def loadMOL2(self, event):
# Get the structure from a MOL2 file and load it into PyMOL
logInfo("Load MOL2 button clicked")
dlg = wx.FileDialog(
self, message="Choose a File",
defaultDir=self.seqWin.cwd,
defaultFile="",
wildcard="MOL2 Files (*.mol2)|*.mol2",
style=wx.OPEN | wx.CHANGE_DIR)
if (dlg.ShowModal() == wx.ID_OK):
paths = dlg.GetPaths()
# Change cwd to the last opened file
if (platform.system() == "Windows"):
lastDirIndx = paths[len(paths)-1].rfind("\\")
else:
lastDirIndx = paths[len(paths)-1].rfind("/")
self.seqWin.cwd = str(paths[len(paths)-1][0:lastDirIndx])
filename = str(paths[0])
self.loadedfile = filename
localfilename = filename[lastDirIndx+1:]
# Delete a file if we're loading a new one
try:
self.cmd.remove("params")
self.cmd.delete("params")
except:
pass
try:
self.cmd.load(filename, "params")
except:
wx.MessageBox("The file " + filename + " could not be read!", "File Cannot Be Read", wx.OK|wx.ICON_EXCLAMATION)
return
logInfo("MOL2 file loaded", filename)
self.cmd.select("paramssele", "model params")
self.cmd.hide("everything", "paramssele")
self.cmd.show("sticks", "paramssele")
self.cmd.color("gray", "paramssele and symbol c")
self.cmd.show("spheres", "metal and model params")
self.btnCreate.Enable()
self.lblMOL2.SetLabel(localfilename)
self.lblMOL2.SetForegroundColour("#FFFFFF")
if (platform.system() == "Linux"):
resizeTextControlForUNIX(self.lblMOL2, 10, 180)
else:
logInfo("Load MOL2 operation cancelled")
def typeToggle(self, event):
if (self.paramsType == "Ligand"):
self.paramsType = "Polymer"
self.NtermMenu.Enable()
self.CtermMenu.Enable()
if (platform.system() == "Darwin"):
self.btnType.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/advresiduecreator/btnType_Polymer.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnType.SetLabel(self.paramsType)
self.btnType.SetToolTipString("Uploaded .mol2 file represents an NCAA embedded as part of a polypeptide sequence")
else:
self.paramsType = "Ligand"
self.NtermMenu.Disable()
self.CtermMenu.Disable()
if (platform.system() == "Darwin"):
self.btnType.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/advresiduecreator/btnType_Ligand.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnType.SetLabel(self.paramsType)
self.btnType.SetToolTipString("Uploaded .mol2 file represents a ligand")
logInfo("Params type changed to " + self.paramsType)
def createParams(self, event):
# Change to the sandbox location
goToSandbox()
# Attempt to generate the params file
try:
if (os.path.isfile("LG.params")):
os.remove("LG.params")
if (os.path.isfile("LG.fa.params")):
os.remove("LG.fa.params")
if (os.path.isfile("LG.cen.params")):
os.remove("LG.cen.params")
molfile_to_params.main([self.loadedfile, "--no-pdb", "--keep-names", "-c"])
except:
wx.MessageBox("The file " + self.loadedfile + " could not be converted to a params file!", "File Cannot Be Processed", wx.OK|wx.ICON_EXCLAMATION)
return
logInfo("Params file created successfully")
# Now read the LG.params file and grab out the atom names and their assigned types
# so the user can see them and modify them if desired
f = open("LG.fa.params", "r")
if (self.grdParamsAtoms.NumberRows > 0):
self.grdParamsAtoms.DeleteRows(0, self.grdParamsAtoms.NumberRows)
self.atomnames = []
atomtypes = []
for aline in f:
if (aline[0:4] == "ATOM"):
atomname = aline.split()[1]
atomtype = aline.split()[2]
self.atomnames.append(atomname)
atomtypes.append(atomtype)
f.close()
# Sort the atomnames to make it easier for the user to find things
for i in range(0, len(self.atomnames)-1):
lowest = i
for j in range(i+1, len(self.atomnames)):
if (self.atomnames[j] < self.atomnames[lowest]):
lowest = j
temp = self.atomnames[i]
self.atomnames[i] = self.atomnames[lowest]
self.atomnames[lowest] = temp
temp = atomtypes[i]
atomtypes[i] = atomtypes[lowest]
atomtypes[lowest] = temp
# Now add things to the grid
for i in range(0, len(self.atomnames)):
self.grdParamsAtoms.AppendRows(1)
self.grdParamsAtoms.SetRowLabelValue(i, self.atomnames[i])
self.grdParamsAtoms.SetCellValue(i, 0, atomtypes[i])
self.grdParamsAtoms.SetCellAlignment(i, 0, wx.ALIGN_CENTRE, wx.ALIGN_CENTRE)
readOnly = wx.grid.GridCellAttr()
readOnly.SetReadOnly(True)
self.grdParamsAtoms.SetRowAttr(i, readOnly)
# Update some of the atom selection menus with the list of atomnames
self.atomMenu.Clear()
self.atomMenu.AppendItems(self.atomnames)
self.NtermMenu.Clear()
self.NtermMenu.AppendItems(self.atomnames)
self.CtermMenu.Clear()
self.CtermMenu.AppendItems(self.atomnames)
self.btnAdd.Enable()
def atomMenuSelect(self, event):
# PyMOL should make the selected atom stand out, so it will generate a small sphere
# for the selected atom
logInfo("Atom " + self.atomMenu.GetStringSelection() + " selected")
# Set the selected residue's row to red so it is easy to see what the selection is
for r in range(0, self.grdParamsAtoms.NumberRows):
if (r == self.atomMenu.GetSelection()):
for c in range(0, self.grdParamsAtoms.NumberCols):
self.grdParamsAtoms.SetCellBackgroundColour(r, c, "light blue")
else:
for c in range(0, self.grdParamsAtoms.NumberCols):
self.grdParamsAtoms.SetCellBackgroundColour(r, c, "white")
self.grdParamsAtoms.Refresh()
self.cmd.select("paramssele", "model params")
self.cmd.hide("spheres", "paramssele")
try:
self.cmd.select("paramssele", "model params and name " + self.atomMenu.GetStringSelection())
self.cmd.show("spheres", "paramssele")
self.cmd.set("sphere_scale", 0.3, "paramssele")
self.cmd.select("sele", "paramssele")
self.cmd.enable("sele")
self.cmd.delete("paramssele")
# Get the element symbol from PyMOL
self.pymol.stored.element = ""
self.cmd.iterate_state(1, "sele", "stored.element = elem")
# Put the available choices into the typeMenu
self.typeMenu.Clear()
self.typeMenu.AppendItems(self.atomtypes[self.pymol.stored.element.upper()])
for i in range(0, self.grdParamsAtoms.NumberRows):
if (self.atomMenu.GetStringSelection().strip() == self.grdParamsAtoms.GetRowLabelValue(i).strip()):
currentType = self.grdParamsAtoms.GetCellValue(i, 0).strip()
break
indx = self.typeMenu.GetItems().index(currentType)
self.typeMenu.SetSelection(indx)
except:
pass
def typeMenuSelect(self, event):
# Replace the type in the grid with the selected type
atomname = self.atomMenu.GetStringSelection()
atomtype = self.typeMenu.GetStringSelection()
logInfo("Atom type " + atomtype + " selected")
for r in range(0, self.grdParamsAtoms.NumberRows):
if (atomname.strip() == self.grdParamsAtoms.GetRowLabelValue(r).strip()):
self.grdParamsAtoms.SetCellValue(r, 0, atomtype)
break
def addToDB(self, event):
# First check to make sure that a valid code is given
code = self.txtCode.GetValue().strip().upper()
if (len(code) > 3):
wx.MessageBox("You have not entered a valid 3-letter code. Please enter a valid 3-letter code.", "Bad Code", wx.OK|wx.ICON_EXCLAMATION)
return
# If this is a polymer, make sure both an N and C termini are specified
if (self.paramsType == "Polymer"):
Nterm = self.NtermMenu.GetStringSelection().strip()
Cterm = self.CtermMenu.GetStringSelection().strip()
if (len(Nterm) == 0 or len(Cterm) == 0):
wx.MessageBox("Please choose an N and C terminus atom for your polymer residue.", "Termini Not Specified", wx.OK|wx.ICON_EXCLAMATION)
return
# Now make sure this parameters file isn't already in our database
if (platform.system() == "Windows"):
paramsfile = "params\\" + code + ".params"
paramslist = glob.glob("params\\*.params")
else:
paramsfile = "params/" + code + ".params"
paramslist = glob.glob("params/*.params")
if (paramsfile in paramslist):
dlg = wx.MessageDialog(self, "There is already a parameters file for this code in the database. Do you want to overwrite the previous entry?", "Duplicate Parameters", wx.YES_NO | wx.ICON_EXCLAMATION | wx.CENTRE)
if (dlg.ShowModal() == wx.ID_NO):
dlg.Destroy()
return
dlg.Destroy()
logInfo("Saved params file with the code " + self.txtCode.GetValue().strip().upper())
if (self.paramsType == "Polymer"):
logInfo("The N-terminus was " + Nterm + " and the C-terminus was " + Cterm)
# Now we have to read the LG.params file and replace the user uploaded data
for (origfile, cen_fa) in [("LG.fa.params", "fa"), ("LG.cen.params", "cen")]:
f = open(origfile, "r")
f2 = open(paramsfile + "." + cen_fa + ".params", "w")
wroteTermini = False
for aline in f:
if (aline[0:4] == "NAME"):
f2.write("NAME " + code + "\n")
elif (aline[0:9] == "IO_STRING"):
f2.write("IO_STRING " + code + " Z\n")
elif (aline[0:4] == "TYPE"):
f2.write("TYPE " + self.paramsType.upper() + "\n")
elif (aline[0:4] == "ATOM"):
# Now we have to find the type from our graph
thisatomname = aline[5:9].strip()
for r in range(0, self.grdParamsAtoms.NumberRows):
if (thisatomname == self.grdParamsAtoms.GetRowLabelValue(r).strip()):
thisatomtype = self.grdParamsAtoms.GetCellValue(r, 0).strip()
break
# Get the right amount of whitespace on all sides
if (len(thisatomtype) == 1):
thisatomtype = thisatomtype + " "
elif (len(thisatomtype) == 2):
thisatomtype = thisatomtype + " "
elif (len(thisatomtype) == 3):
thisatomtype = thisatomtype + " "
# Replace the old entry
aline = aline[0:10] + thisatomtype + aline[14:]
f2.write(aline)
elif (aline[0:4] == "BOND" and not(wroteTermini) and self.paramsType == "Polymer"):
f2.write("LOWER_CONNECT " + Nterm + "\n")
f2.write("UPPER_CONNECT " + Cterm + "\n")
f2.write(aline)
wroteTermini = True
else:
f2.write(aline)
f.close()
f2.close()
# Delete the LG.params file
os.remove("LG.fa.params")
os.remove("LG.cen.params")
wx.MessageBox("Your parameters file was created successfully! InteractiveROSETTA will now recognize " + self.txtCode.GetValue() + " entries.", "Params Creation Successful", wx.OK|wx.ICON_EXCLAMATION)
self.grdParamsAtoms.ClearGrid()
self.atomMenu.Clear()
self.typeMenu.Clear()
self.btnAdd.Disable()
self.removeMenu.Append(self.selectedType)
self.cmd.remove("params")
def removeParams(self, event):
# Take the indicated parameters file out of the database
paramsToRemove = self.removeMenu.GetStringSelection().strip()
if (len(paramsToRemove) == 0):
return
dlg = wx.MessageDialog(self, "This operation will remove " + paramsToRemove + " from the database. Are you sure you want to proceed?", "Parameters Removal", wx.YES_NO | wx.ICON_EXCLAMATION | wx.CENTRE)
if (dlg.ShowModal() == wx.ID_NO):
dlg.Destroy()
return
dlg.Destroy()
goToSandbox("params")
try:
os.remove(paramsToRemove + ".fa.params")
except:
pass
goToSandbox()
self.removeMenu.Delete(self.removeMenu.GetItems().index(paramsToRemove))
dlg = wx.MessageDialog(self, "If any loaded models contained " + paramsToRemove + ", you need to unload and reload them to prevent unexpected behavior.", "Parameters Removal", wx.OK | wx.ICON_EXCLAMATION | wx.CENTRE)
dlg.ShowModal()
dlg.Destroy() | schenc3/InteractiveROSETTA | InteractiveROSETTA/scripts/residuecreator.py | Python | gpl-2.0 | 29,978 | [
"PyMOL"
] | ac8b96be09b23cecb6f4e12d74ed454f5548ad4740afe24497908395097ebb96 |
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pytest
from hyperspy._signals.signal1d import Signal1D
from hyperspy.component import Component, Parameter
from hyperspy.components1d import Gaussian, Lorentzian, ScalableFixedPattern
def remove_empty_numpy_strings(dic):
for k, v in dic.items():
if isinstance(v, dict):
remove_empty_numpy_strings(v)
elif isinstance(v, list):
for vv in v:
if isinstance(vv, dict):
remove_empty_numpy_strings(vv)
elif isinstance(vv, np.string_) and len(vv) == 0:
vv = ''
elif isinstance(v, np.string_) and len(v) == 0:
del dic[k]
dic[k] = ''
class DummyAxesManager:
navigation_shape = [1, ]
navigation_size = 2
indices = ()
@property
def _navigation_shape_in_array(self):
return self.navigation_shape[::-1]
class TestParameterDictionary:
def setup_method(self, method):
self.par = Parameter()
self.par.name = 'asd'
self.par._id_name = 'newone'
self.par.twin_function_expr = "x * x"
self.par.twin_inverse_function_expr = "sqrt(x)"
self.par._axes_manager = DummyAxesManager()
self.par._create_array()
self.par.value = 1
self.par.std = 0.1
self.par.store_current_value_in_array()
self.par.ext_bounded = False
self.par.ext_force_positive = False
def test_to_dictionary(self):
d = self.par.as_dictionary()
assert d['name'] == self.par.name
assert d['_id_name'] == self.par._id_name
np.testing.assert_equal(d['map']['values'][0], 1)
np.testing.assert_equal(d['map']['std'][0], 0.1)
assert d['map']['is_set'][0]
np.testing.assert_equal(d['value'], self.par.value)
np.testing.assert_equal(d['std'], self.par.std)
assert d['free'] is self.par.free
assert d['self'] == id(self.par)
np.testing.assert_equal(d['_bounds'], self.par._bounds)
assert d['ext_bounded'] is self.par.ext_bounded
assert (
d['ext_force_positive'] is self.par.ext_force_positive)
def test_load_dictionary(self):
d = self.par.as_dictionary()
p = Parameter()
p._id_name = 'newone'
_id = p._load_dictionary(d)
assert _id == id(self.par)
assert p.name == self.par.name
assert p._id_name == self.par._id_name
np.testing.assert_equal(p.map['values'][0], 1)
np.testing.assert_equal(p.map['std'][0], 0.1)
assert p.map['is_set'][0]
np.testing.assert_equal(p.value, self.par.value)
np.testing.assert_equal(p.std, self.par.std)
np.testing.assert_equal(p.free, self.par.free)
np.testing.assert_equal(p._bounds, self.par._bounds)
rn = np.random.random()
np.testing.assert_equal(
p.twin_function(rn),
self.par.twin_function(rn))
np.testing.assert_equal(
p.twin_inverse_function(rn),
self.par.twin_inverse_function(rn))
def test_invalid_name(self):
d = self.par.as_dictionary()
d['_id_name'] = 'otherone'
p = Parameter()
p._id_name = 'newone'
with pytest.raises(ValueError):
_ = p._load_dictionary(d)
class TestComponentDictionary:
def setup_method(self, method):
self.parameter_names = ['par1', 'par2']
self.comp = Component(self.parameter_names)
self.comp.name = 'newname!'
self.comp._id_name = 'dummy names yay!'
self.comp._axes_manager = DummyAxesManager()
self.comp._create_arrays()
self.comp.par1.value = 2.
self.comp.par2.value = 5.
self.comp.par1.std = 0.2
self.comp.par2.std = 0.5
self.comp.store_current_parameters_in_map()
def test_to_dictionary(self):
d = self.comp.as_dictionary()
c = self.comp
assert c.name == d['name']
assert c._id_name == d['_id_name']
assert not d['active_is_multidimensional']
assert d['active']
assert d['_active_array'] is None
for ip, p in enumerate(c.parameters):
assert p.as_dictionary() == d['parameters'][ip]
c.active_is_multidimensional = True
d1 = c.as_dictionary()
assert d1['active_is_multidimensional']
np.testing.assert_array_equal(d1['_active_array'], c._active_array)
def test_load_dictionary(self):
c = self.comp
c.par1.twin_function_expr = "x + 2"
c.par2.twin_function_expr = "x - 2"
d = c.as_dictionary(True)
n = Component(self.parameter_names)
n._id_name = 'dummy names yay!'
_ = n._load_dictionary(d)
assert c.name == n.name
assert c.active == n.active
assert (
c.active_is_multidimensional ==
n.active_is_multidimensional)
for pn, pc in zip(n.parameters, c.parameters):
rn = np.random.random()
assert pn.twin_function(rn) == pc.twin_function(rn)
assert (
pn.twin_inverse_function(rn) ==
pc.twin_inverse_function(rn))
dn = pn.as_dictionary()
del dn['self']
dc = pc.as_dictionary()
del dc['self']
print(list(dn.keys()))
print(list(dc.keys()))
assert dn == dc
def test_invalid_component_name(self):
c = self.comp
d = c.as_dictionary()
n = Component(self.parameter_names)
with pytest.raises(ValueError):
_ = n._load_dictionary(d)
def test_invalid_parameter_name(self):
c = self.comp
d = c.as_dictionary()
n = Component([a + 's' for a in self.parameter_names])
n._id_name = 'dummy names yay!'
with pytest.raises(ValueError):
_ = n._load_dictionary(d)
class TestModelDictionary:
def setup_method(self, method):
s = Signal1D(np.array([1.0, 2, 4, 7, 12, 7, 4, 2, 1]))
m = s.create_model()
m.low_loss = (s + 3.0).deepcopy()
self.model = m
self.s = s
m.append(Gaussian())
m.append(Gaussian())
m.append(ScalableFixedPattern(s * 0.3))
m[0].A.twin = m[1].A
m.fit()
def test_to_dictionary(self):
m = self.model
d = m.as_dictionary()
print(d['low_loss'])
np.testing.assert_allclose(m.low_loss.data, d['low_loss']['data'])
np.testing.assert_allclose(m.chisq.data, d['chisq.data'])
np.testing.assert_allclose(m.dof.data, d['dof.data'])
np.testing.assert_equal(
d['free_parameters_boundaries'],
m.free_parameters_boundaries)
assert d['convolved'] is m.convolved
for num, c in enumerate(m):
tmp = c.as_dictionary()
remove_empty_numpy_strings(tmp)
assert d['components'][num]['name'] == tmp['name']
assert d['components'][num]['_id_name'] == tmp['_id_name']
np.testing.assert_equal(d['components'][-1]['signal1D'],
(m.signal * 0.3)._to_dictionary())
def test_load_dictionary(self):
d = self.model.as_dictionary()
mn = self.s.create_model()
mn.append(Lorentzian())
mn._load_dictionary(d)
mo = self.model
# assert_true(np.allclose(mo.signal1D.data, mn.signal1D.data))
np.testing.assert_allclose(mo.chisq.data, mn.chisq.data)
np.testing.assert_allclose(mo.dof.data, mn.dof.data)
np.testing.assert_allclose(mn.low_loss.data, mo.low_loss.data)
np.testing.assert_equal(
mn.free_parameters_boundaries,
mo.free_parameters_boundaries)
assert mn.convolved is mo.convolved
for i in range(len(mn)):
assert mn[i]._id_name == mo[i]._id_name
for po, pn in zip(mo[i].parameters, mn[i].parameters):
np.testing.assert_allclose(po.map['values'], pn.map['values'])
np.testing.assert_allclose(po.map['is_set'], pn.map['is_set'])
assert mn[0].A.twin is mn[1].A
| erh3cq/hyperspy | hyperspy/tests/model/test_model_as_dictionary.py | Python | gpl-3.0 | 8,879 | [
"Gaussian"
] | 776218fa0dfcb964524a5ac9616872fdf92c6454812ab6333022032e78e1015f |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from pyspark import since, keyword_only
from pyspark.ml.param.shared import *
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaWrapper
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
__all__ = ['AFTSurvivalRegression', 'AFTSurvivalRegressionModel',
'DecisionTreeRegressor', 'DecisionTreeRegressionModel',
'GBTRegressor', 'GBTRegressionModel',
'GeneralizedLinearRegression', 'GeneralizedLinearRegressionModel',
'GeneralizedLinearRegressionSummary', 'GeneralizedLinearRegressionTrainingSummary',
'IsotonicRegression', 'IsotonicRegressionModel',
'LinearRegression', 'LinearRegressionModel',
'LinearRegressionSummary', 'LinearRegressionTrainingSummary',
'RandomForestRegressor', 'RandomForestRegressionModel']
@inherit_doc
class LinearRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter,
HasRegParam, HasTol, HasElasticNetParam, HasFitIntercept,
HasStandardization, HasSolver, HasWeightCol, HasAggregationDepth,
JavaMLWritable, JavaMLReadable):
"""
Linear regression.
The learning objective is to minimize the squared error, with regularization.
The specific squared error loss function used is: L = 1/2n ||A coefficients - y||^2^
This supports multiple types of regularization:
* none (a.k.a. ordinary least squares)
* L2 (ridge regression)
* L1 (Lasso)
* L2 + L1 (elastic net)
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, 2.0, Vectors.dense(1.0)),
... (0.0, 2.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"])
>>> lr = LinearRegression(maxIter=5, regParam=0.0, solver="normal", weightCol="weight")
>>> model = lr.fit(df)
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> abs(model.transform(test0).head().prediction - (-1.0)) < 0.001
True
>>> abs(model.coefficients[0] - 1.0) < 0.001
True
>>> abs(model.intercept - 0.0) < 0.001
True
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> abs(model.transform(test1).head().prediction - 1.0) < 0.001
True
>>> lr.setParams("vector")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> lr_path = temp_path + "/lr"
>>> lr.save(lr_path)
>>> lr2 = LinearRegression.load(lr_path)
>>> lr2.getMaxIter()
5
>>> model_path = temp_path + "/lr_model"
>>> model.save(model_path)
>>> model2 = LinearRegressionModel.load(model_path)
>>> model.coefficients[0] == model2.coefficients[0]
True
>>> model.intercept == model2.intercept
True
>>> model.numFeatures
1
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, solver="auto", weightCol=None, aggregationDepth=2):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
standardization=True, solver="auto", weightCol=None, aggregationDepth=2)
"""
super(LinearRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.LinearRegression", self.uid)
self._setDefault(maxIter=100, regParam=0.0, tol=1e-6)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, solver="auto", weightCol=None, aggregationDepth=2):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
standardization=True, solver="auto", weightCol=None, aggregationDepth=2)
Sets params for linear regression.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return LinearRegressionModel(java_model)
class LinearRegressionModel(JavaModel, JavaPredictionModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`LinearRegression`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("1.4.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. residuals, mse, r-squared ) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
if self.hasSummary:
java_lrt_summary = self._call_java("summary")
return LinearRegressionTrainingSummary(java_lrt_summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@property
@since("2.0.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@since("2.0.0")
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_lr_summary = self._call_java("evaluate", dataset)
return LinearRegressionSummary(java_lr_summary)
class LinearRegressionSummary(JavaWrapper):
"""
.. note:: Experimental
Linear regression results evaluated on a dataset.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Dataframe outputted by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def predictionCol(self):
"""
Field in "predictions" which gives the predicted value of
the label at each instance.
"""
return self._call_java("predictionCol")
@property
@since("2.0.0")
def labelCol(self):
"""
Field in "predictions" which gives the true label of each
instance.
"""
return self._call_java("labelCol")
@property
@since("2.0.0")
def featuresCol(self):
"""
Field in "predictions" which gives the features of each instance
as a vector.
"""
return self._call_java("featuresCol")
@property
@since("2.0.0")
def explainedVariance(self):
"""
Returns the explained variance regression score.
explainedVariance = 1 - variance(y - \hat{y}) / variance(y)
.. seealso:: `Wikipedia explain variation \
<http://en.wikipedia.org/wiki/Explained_variation>`_
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("explainedVariance")
@property
@since("2.0.0")
def meanAbsoluteError(self):
"""
Returns the mean absolute error, which is a risk function
corresponding to the expected value of the absolute error
loss or l1-norm loss.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("meanAbsoluteError")
@property
@since("2.0.0")
def meanSquaredError(self):
"""
Returns the mean squared error, which is a risk function
corresponding to the expected value of the squared error
loss or quadratic loss.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("meanSquaredError")
@property
@since("2.0.0")
def rootMeanSquaredError(self):
"""
Returns the root mean squared error, which is defined as the
square root of the mean squared error.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("rootMeanSquaredError")
@property
@since("2.0.0")
def r2(self):
"""
Returns R^2^, the coefficient of determination.
.. seealso:: `Wikipedia coefficient of determination \
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("r2")
@property
@since("2.0.0")
def residuals(self):
"""
Residuals (label - predicted value)
"""
return self._call_java("residuals")
@property
@since("2.0.0")
def numInstances(self):
"""
Number of instances in DataFrame predictions
"""
return self._call_java("numInstances")
@property
@since("2.0.0")
def devianceResiduals(self):
"""
The weighted residuals, the usual residuals rescaled by the
square root of the instance weights.
"""
return self._call_java("devianceResiduals")
@property
@since("2.0.0")
def coefficientStandardErrors(self):
"""
Standard error of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("coefficientStandardErrors")
@property
@since("2.0.0")
def tValues(self):
"""
T-statistic of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("tValues")
@property
@since("2.0.0")
def pValues(self):
"""
Two-sided p-value of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("pValues")
@inherit_doc
class LinearRegressionTrainingSummary(LinearRegressionSummary):
"""
.. note:: Experimental
Linear regression training results. Currently, the training summary ignores the
training weights except for the objective trace.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def objectiveHistory(self):
"""
Objective function (scaled loss + regularization) at each
iteration.
This value is only available when using the "l-bfgs" solver.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("objectiveHistory")
@property
@since("2.0.0")
def totalIterations(self):
"""
Number of training iterations until termination.
This value is only available when using the "l-bfgs" solver.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("totalIterations")
@inherit_doc
class IsotonicRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasWeightCol, JavaMLWritable, JavaMLReadable):
"""
Currently implemented using parallelized pool adjacent violators algorithm.
Only univariate (single feature) algorithm supported.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> ir = IsotonicRegression()
>>> model = ir.fit(df)
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> model.boundaries
DenseVector([0.0, 1.0])
>>> ir_path = temp_path + "/ir"
>>> ir.save(ir_path)
>>> ir2 = IsotonicRegression.load(ir_path)
>>> ir2.getIsotonic()
True
>>> model_path = temp_path + "/ir_model"
>>> model.save(model_path)
>>> model2 = IsotonicRegressionModel.load(model_path)
>>> model.boundaries == model2.boundaries
True
>>> model.predictions == model2.predictions
True
.. versionadded:: 1.6.0
"""
isotonic = \
Param(Params._dummy(), "isotonic",
"whether the output sequence should be isotonic/increasing (true) or" +
"antitonic/decreasing (false).", typeConverter=TypeConverters.toBoolean)
featureIndex = \
Param(Params._dummy(), "featureIndex",
"The index of the feature if featuresCol is a vector column, no effect otherwise.",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol=None, isotonic=True, featureIndex=0):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
weightCol=None, isotonic=True, featureIndex=0):
"""
super(IsotonicRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.IsotonicRegression", self.uid)
self._setDefault(isotonic=True, featureIndex=0)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol=None, isotonic=True, featureIndex=0):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
weightCol=None, isotonic=True, featureIndex=0):
Set the params for IsotonicRegression.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return IsotonicRegressionModel(java_model)
def setIsotonic(self, value):
"""
Sets the value of :py:attr:`isotonic`.
"""
return self._set(isotonic=value)
def getIsotonic(self):
"""
Gets the value of isotonic or its default value.
"""
return self.getOrDefault(self.isotonic)
def setFeatureIndex(self, value):
"""
Sets the value of :py:attr:`featureIndex`.
"""
return self._set(featureIndex=value)
def getFeatureIndex(self):
"""
Gets the value of featureIndex or its default value.
"""
return self.getOrDefault(self.featureIndex)
class IsotonicRegressionModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`IsotonicRegression`.
.. versionadded:: 1.6.0
"""
@property
@since("1.6.0")
def boundaries(self):
"""
Boundaries in increasing order for which predictions are known.
"""
return self._call_java("boundaries")
@property
@since("1.6.0")
def predictions(self):
"""
Predictions associated with the boundaries at the same index, monotone because of isotonic
regression.
"""
return self._call_java("predictions")
class TreeEnsembleParams(DecisionTreeParams):
"""
Mixin for Decision Tree-based ensemble algorithms parameters.
"""
subsamplingRate = Param(Params._dummy(), "subsamplingRate", "Fraction of the training data " +
"used for learning each decision tree, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
def __init__(self):
super(TreeEnsembleParams, self).__init__()
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("1.4.0")
def getSubsamplingRate(self):
"""
Gets the value of subsamplingRate or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
class TreeRegressorParams(Params):
"""
Private class to track supported impurity measures.
"""
supportedImpurities = ["variance"]
impurity = Param(Params._dummy(), "impurity",
"Criterion used for information gain calculation (case-insensitive). " +
"Supported options: " +
", ".join(supportedImpurities), typeConverter=TypeConverters.toString)
def __init__(self):
super(TreeRegressorParams, self).__init__()
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def getImpurity(self):
"""
Gets the value of impurity or its default value.
"""
return self.getOrDefault(self.impurity)
class RandomForestParams(TreeEnsembleParams):
"""
Private class to track supported random forest parameters.
"""
supportedFeatureSubsetStrategies = ["auto", "all", "onethird", "sqrt", "log2"]
numTrees = Param(Params._dummy(), "numTrees", "Number of trees to train (>= 1).",
typeConverter=TypeConverters.toInt)
featureSubsetStrategy = \
Param(Params._dummy(), "featureSubsetStrategy",
"The number of features to consider for splits at each tree node. Supported " +
"options: " + ", ".join(supportedFeatureSubsetStrategies) + ", (0.0-1.0], [1-n].",
typeConverter=TypeConverters.toString)
def __init__(self):
super(RandomForestParams, self).__init__()
@since("1.4.0")
def setNumTrees(self, value):
"""
Sets the value of :py:attr:`numTrees`.
"""
return self._set(numTrees=value)
@since("1.4.0")
def getNumTrees(self):
"""
Gets the value of numTrees or its default value.
"""
return self.getOrDefault(self.numTrees)
@since("1.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
@since("1.4.0")
def getFeatureSubsetStrategy(self):
"""
Gets the value of featureSubsetStrategy or its default value.
"""
return self.getOrDefault(self.featureSubsetStrategy)
class GBTParams(TreeEnsembleParams):
"""
Private class to track supported GBT params.
"""
supportedLossTypes = ["squared", "absolute"]
@inherit_doc
class DecisionTreeRegressor(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
DecisionTreeParams, TreeRegressorParams, HasCheckpointInterval,
HasSeed, JavaMLWritable, JavaMLReadable, HasVarianceCol):
"""
`Decision tree <http://en.wikipedia.org/wiki/Decision_tree_learning>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> dt = DecisionTreeRegressor(maxDepth=2, varianceCol="variance")
>>> model = dt.fit(df)
>>> model.depth
1
>>> model.numNodes
3
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> dtr_path = temp_path + "/dtr"
>>> dt.save(dtr_path)
>>> dt2 = DecisionTreeRegressor.load(dtr_path)
>>> dt2.getMaxDepth()
2
>>> model_path = temp_path + "/dtr_model"
>>> model.save(model_path)
>>> model2 = DecisionTreeRegressionModel.load(model_path)
>>> model.numNodes == model2.numNodes
True
>>> model.depth == model2.depth
True
>>> model.transform(test1).head().variance
0.0
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance",
seed=None, varianceCol=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", seed=None, varianceCol=None)
"""
super(DecisionTreeRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.DecisionTreeRegressor", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", seed=None, varianceCol=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", seed=None, varianceCol=None)
Sets params for the DecisionTreeRegressor.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return DecisionTreeRegressionModel(java_model)
@inherit_doc
class DecisionTreeModel(JavaModel, JavaPredictionModel):
"""
Abstraction for Decision Tree models.
.. versionadded:: 1.5.0
"""
@property
@since("1.5.0")
def numNodes(self):
"""Return number of nodes of the decision tree."""
return self._call_java("numNodes")
@property
@since("1.5.0")
def depth(self):
"""Return depth of the decision tree."""
return self._call_java("depth")
@property
@since("2.0.0")
def toDebugString(self):
"""Full description of model."""
return self._call_java("toDebugString")
def __repr__(self):
return self._call_java("toString")
@inherit_doc
class TreeEnsembleModel(JavaModel):
"""
(private abstraction)
Represents a tree ensemble model.
"""
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeModel(m) for m in list(self._call_java("trees"))]
@property
@since("2.0.0")
def getNumTrees(self):
"""Number of trees in ensemble."""
return self._call_java("getNumTrees")
@property
@since("1.5.0")
def treeWeights(self):
"""Return the weights for each tree"""
return list(self._call_java("javaTreeWeights"))
@property
@since("2.0.0")
def totalNumNodes(self):
"""Total number of nodes, summed over all trees in the ensemble."""
return self._call_java("totalNumNodes")
@property
@since("2.0.0")
def toDebugString(self):
"""Full description of model."""
return self._call_java("toDebugString")
def __repr__(self):
return self._call_java("toString")
@inherit_doc
class DecisionTreeRegressionModel(DecisionTreeModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`DecisionTreeRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
This generalizes the idea of "Gini" importance to other losses,
following the explanation of Gini importance from "Random Forests" documentation
by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn.
This feature importance is calculated as follows:
- importance(feature j) = sum (over nodes which split on feature j) of the gain,
where gain is scaled by the number of instances passing through node
- Normalize importances for tree to sum to 1.
.. note:: Feature importance for single decision trees can have high variance due to
correlated predictor variables. Consider using a :py:class:`RandomForestRegressor`
to determine feature importance instead.
"""
return self._call_java("featureImportances")
@inherit_doc
class RandomForestRegressor(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasSeed,
RandomForestParams, TreeRegressorParams, HasCheckpointInterval,
JavaMLWritable, JavaMLReadable):
"""
`Random Forest <http://en.wikipedia.org/wiki/Random_forest>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> rf = RandomForestRegressor(numTrees=2, maxDepth=2, seed=42)
>>> model = rf.fit(df)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 1.0])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> model.numFeatures
1
>>> model.trees
[DecisionTreeRegressionModel (uid=...) of depth..., DecisionTreeRegressionModel...]
>>> model.getNumTrees
2
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
0.5
>>> rfr_path = temp_path + "/rfr"
>>> rf.save(rfr_path)
>>> rf2 = RandomForestRegressor.load(rfr_path)
>>> rf2.getNumTrees()
2
>>> model_path = temp_path + "/rfr_model"
>>> model.save(model_path)
>>> model2 = RandomForestRegressionModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20,
featureSubsetStrategy="auto"):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \
featureSubsetStrategy="auto")
"""
super(RandomForestRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.RandomForestRegressor", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, numTrees=20,
featureSubsetStrategy="auto")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20,
featureSubsetStrategy="auto"):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \
featureSubsetStrategy="auto")
Sets params for linear regression.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return RandomForestRegressionModel(java_model)
class RandomForestRegressionModel(TreeEnsembleModel, JavaPredictionModel, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by :class:`RandomForestRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances`
"""
return self._call_java("featureImportances")
@inherit_doc
class GBTRegressor(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter,
GBTParams, HasCheckpointInterval, HasStepSize, HasSeed, JavaMLWritable,
JavaMLReadable, TreeRegressorParams):
"""
`Gradient-Boosted Trees (GBTs) <http://en.wikipedia.org/wiki/Gradient_boosting>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> gbt = GBTRegressor(maxIter=5, maxDepth=2, seed=42)
>>> print(gbt.getImpurity())
variance
>>> model = gbt.fit(df)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> gbtr_path = temp_path + "gbtr"
>>> gbt.save(gbtr_path)
>>> gbt2 = GBTRegressor.load(gbtr_path)
>>> gbt2.getMaxDepth()
2
>>> model_path = temp_path + "gbtr_model"
>>> model.save(model_path)
>>> model2 = GBTRegressionModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.treeWeights == model2.treeWeights
True
>>> model.trees
[DecisionTreeRegressionModel (uid=...) of depth..., DecisionTreeRegressionModel...]
.. versionadded:: 1.4.0
"""
lossType = Param(Params._dummy(), "lossType",
"Loss function which GBT tries to minimize (case-insensitive). " +
"Supported options: " + ", ".join(GBTParams.supportedLossTypes),
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None,
impurity="variance"):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \
impurity="variance")
"""
super(GBTRegressor, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.regression.GBTRegressor", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1,
impurity="variance")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None,
impuriy="variance"):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \
impurity="variance")
Sets params for Gradient Boosted Tree Regression.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GBTRegressionModel(java_model)
@since("1.4.0")
def setLossType(self, value):
"""
Sets the value of :py:attr:`lossType`.
"""
return self._set(lossType=value)
@since("1.4.0")
def getLossType(self):
"""
Gets the value of lossType or its default value.
"""
return self.getOrDefault(self.lossType)
class GBTRegressionModel(TreeEnsembleModel, JavaPredictionModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`GBTRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances`
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
@inherit_doc
class AFTSurvivalRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasFitIntercept, HasMaxIter, HasTol, HasAggregationDepth,
JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
Accelerated Failure Time (AFT) Model Survival Regression
Fit a parametric AFT survival regression model based on the Weibull distribution
of the survival time.
.. seealso:: `AFT Model <https://en.wikipedia.org/wiki/Accelerated_failure_time_model>`_
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0), 1.0),
... (0.0, Vectors.sparse(1, [], []), 0.0)], ["label", "features", "censor"])
>>> aftsr = AFTSurvivalRegression()
>>> model = aftsr.fit(df)
>>> model.predict(Vectors.dense(6.3))
1.0
>>> model.predictQuantiles(Vectors.dense(6.3))
DenseVector([0.0101, 0.0513, 0.1054, 0.2877, 0.6931, 1.3863, 2.3026, 2.9957, 4.6052])
>>> model.transform(df).show()
+-----+---------+------+----------+
|label| features|censor|prediction|
+-----+---------+------+----------+
| 1.0| [1.0]| 1.0| 1.0|
| 0.0|(1,[],[])| 0.0| 1.0|
+-----+---------+------+----------+
...
>>> aftsr_path = temp_path + "/aftsr"
>>> aftsr.save(aftsr_path)
>>> aftsr2 = AFTSurvivalRegression.load(aftsr_path)
>>> aftsr2.getMaxIter()
100
>>> model_path = temp_path + "/aftsr_model"
>>> model.save(model_path)
>>> model2 = AFTSurvivalRegressionModel.load(model_path)
>>> model.coefficients == model2.coefficients
True
>>> model.intercept == model2.intercept
True
>>> model.scale == model2.scale
True
.. versionadded:: 1.6.0
"""
censorCol = Param(Params._dummy(), "censorCol",
"censor column name. The value of this column could be 0 or 1. " +
"If the value is 1, it means the event has occurred i.e. " +
"uncensored; otherwise censored.", typeConverter=TypeConverters.toString)
quantileProbabilities = \
Param(Params._dummy(), "quantileProbabilities",
"quantile probabilities array. Values of the quantile probabilities array " +
"should be in the range (0, 1) and the array should be non-empty.",
typeConverter=TypeConverters.toListFloat)
quantilesCol = Param(Params._dummy(), "quantilesCol",
"quantiles column name. This column will output quantiles of " +
"corresponding quantileProbabilities if it is set.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor",
quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]),
quantilesCol=None, aggregationDepth=2):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \
quantilesCol=None, aggregationDepth=2)
"""
super(AFTSurvivalRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.AFTSurvivalRegression", self.uid)
self._setDefault(censorCol="censor",
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99],
maxIter=100, tol=1E-6)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor",
quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]),
quantilesCol=None, aggregationDepth=2):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \
quantilesCol=None, aggregationDepth=2):
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return AFTSurvivalRegressionModel(java_model)
@since("1.6.0")
def setCensorCol(self, value):
"""
Sets the value of :py:attr:`censorCol`.
"""
return self._set(censorCol=value)
@since("1.6.0")
def getCensorCol(self):
"""
Gets the value of censorCol or its default value.
"""
return self.getOrDefault(self.censorCol)
@since("1.6.0")
def setQuantileProbabilities(self, value):
"""
Sets the value of :py:attr:`quantileProbabilities`.
"""
return self._set(quantileProbabilities=value)
@since("1.6.0")
def getQuantileProbabilities(self):
"""
Gets the value of quantileProbabilities or its default value.
"""
return self.getOrDefault(self.quantileProbabilities)
@since("1.6.0")
def setQuantilesCol(self, value):
"""
Sets the value of :py:attr:`quantilesCol`.
"""
return self._set(quantilesCol=value)
@since("1.6.0")
def getQuantilesCol(self):
"""
Gets the value of quantilesCol or its default value.
"""
return self.getOrDefault(self.quantilesCol)
class AFTSurvivalRegressionModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
Model fitted by :class:`AFTSurvivalRegression`.
.. versionadded:: 1.6.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("1.6.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("1.6.0")
def scale(self):
"""
Model scale paramter.
"""
return self._call_java("scale")
@since("2.0.0")
def predictQuantiles(self, features):
"""
Predicted Quantiles
"""
return self._call_java("predictQuantiles", features)
@since("2.0.0")
def predict(self, features):
"""
Predicted value
"""
return self._call_java("predict", features)
@inherit_doc
class GeneralizedLinearRegression(JavaEstimator, HasLabelCol, HasFeaturesCol, HasPredictionCol,
HasFitIntercept, HasMaxIter, HasTol, HasRegParam, HasWeightCol,
HasSolver, JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
Generalized Linear Regression.
Fit a Generalized Linear Model specified by giving a symbolic description of the linear
predictor (link function) and a description of the error distribution (family). It supports
"gaussian", "binomial", "poisson" and "gamma" as family. Valid link functions for each family
is listed below. The first link function of each family is the default one.
* "gaussian" -> "identity", "log", "inverse"
* "binomial" -> "logit", "probit", "cloglog"
* "poisson" -> "log", "identity", "sqrt"
* "gamma" -> "inverse", "identity", "log"
.. seealso:: `GLM <https://en.wikipedia.org/wiki/Generalized_linear_model>`_
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(0.0, 0.0)),
... (1.0, Vectors.dense(1.0, 2.0)),
... (2.0, Vectors.dense(0.0, 0.0)),
... (2.0, Vectors.dense(1.0, 1.0)),], ["label", "features"])
>>> glr = GeneralizedLinearRegression(family="gaussian", link="identity", linkPredictionCol="p")
>>> model = glr.fit(df)
>>> transformed = model.transform(df)
>>> abs(transformed.head().prediction - 1.5) < 0.001
True
>>> abs(transformed.head().p - 1.5) < 0.001
True
>>> model.coefficients
DenseVector([1.5..., -1.0...])
>>> model.numFeatures
2
>>> abs(model.intercept - 1.5) < 0.001
True
>>> glr_path = temp_path + "/glr"
>>> glr.save(glr_path)
>>> glr2 = GeneralizedLinearRegression.load(glr_path)
>>> glr.getFamily() == glr2.getFamily()
True
>>> model_path = temp_path + "/glr_model"
>>> model.save(model_path)
>>> model2 = GeneralizedLinearRegressionModel.load(model_path)
>>> model.intercept == model2.intercept
True
>>> model.coefficients[0] == model2.coefficients[0]
True
.. versionadded:: 2.0.0
"""
family = Param(Params._dummy(), "family", "The name of family which is a description of " +
"the error distribution to be used in the model. Supported options: " +
"gaussian (default), binomial, poisson and gamma.",
typeConverter=TypeConverters.toString)
link = Param(Params._dummy(), "link", "The name of link function which provides the " +
"relationship between the linear predictor and the mean of the distribution " +
"function. Supported options: identity, log, inverse, logit, probit, cloglog " +
"and sqrt.", typeConverter=TypeConverters.toString)
linkPredictionCol = Param(Params._dummy(), "linkPredictionCol", "link prediction (linear " +
"predictor) column name", typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, labelCol="label", featuresCol="features", predictionCol="prediction",
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6,
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None):
"""
__init__(self, labelCol="label", featuresCol="features", predictionCol="prediction", \
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None)
"""
super(GeneralizedLinearRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.GeneralizedLinearRegression", self.uid)
self._setDefault(family="gaussian", maxIter=25, tol=1e-6, regParam=0.0, solver="irls")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, labelCol="label", featuresCol="features", predictionCol="prediction",
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6,
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None):
"""
setParams(self, labelCol="label", featuresCol="features", predictionCol="prediction", \
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None)
Sets params for generalized linear regression.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GeneralizedLinearRegressionModel(java_model)
@since("2.0.0")
def setFamily(self, value):
"""
Sets the value of :py:attr:`family`.
"""
return self._set(family=value)
@since("2.0.0")
def getFamily(self):
"""
Gets the value of family or its default value.
"""
return self.getOrDefault(self.family)
@since("2.0.0")
def setLinkPredictionCol(self, value):
"""
Sets the value of :py:attr:`linkPredictionCol`.
"""
return self._set(linkPredictionCol=value)
@since("2.0.0")
def getLinkPredictionCol(self):
"""
Gets the value of linkPredictionCol or its default value.
"""
return self.getOrDefault(self.linkPredictionCol)
@since("2.0.0")
def setLink(self, value):
"""
Sets the value of :py:attr:`link`.
"""
return self._set(link=value)
@since("2.0.0")
def getLink(self):
"""
Gets the value of link or its default value.
"""
return self.getOrDefault(self.link)
class GeneralizedLinearRegressionModel(JavaModel, JavaPredictionModel, JavaMLWritable,
JavaMLReadable):
"""
.. note:: Experimental
Model fitted by :class:`GeneralizedLinearRegression`.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("2.0.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. residuals, deviance, pValues) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
if self.hasSummary:
java_glrt_summary = self._call_java("summary")
return GeneralizedLinearRegressionTrainingSummary(java_glrt_summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@property
@since("2.0.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@since("2.0.0")
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_glr_summary = self._call_java("evaluate", dataset)
return GeneralizedLinearRegressionSummary(java_glr_summary)
class GeneralizedLinearRegressionSummary(JavaWrapper):
"""
.. note:: Experimental
Generalized linear regression results evaluated on a dataset.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Predictions output by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def predictionCol(self):
"""
Field in :py:attr:`predictions` which gives the predicted value of each instance.
This is set to a new column name if the original model's `predictionCol` is not set.
"""
return self._call_java("predictionCol")
@property
@since("2.0.0")
def rank(self):
"""
The numeric rank of the fitted linear model.
"""
return self._call_java("rank")
@property
@since("2.0.0")
def degreesOfFreedom(self):
"""
Degrees of freedom.
"""
return self._call_java("degreesOfFreedom")
@property
@since("2.0.0")
def residualDegreeOfFreedom(self):
"""
The residual degrees of freedom.
"""
return self._call_java("residualDegreeOfFreedom")
@property
@since("2.0.0")
def residualDegreeOfFreedomNull(self):
"""
The residual degrees of freedom for the null model.
"""
return self._call_java("residualDegreeOfFreedomNull")
@since("2.0.0")
def residuals(self, residualsType="deviance"):
"""
Get the residuals of the fitted model by type.
:param residualsType: The type of residuals which should be returned.
Supported options: deviance (default), pearson, working, and response.
"""
return self._call_java("residuals", residualsType)
@property
@since("2.0.0")
def nullDeviance(self):
"""
The deviance for the null model.
"""
return self._call_java("nullDeviance")
@property
@since("2.0.0")
def deviance(self):
"""
The deviance for the fitted model.
"""
return self._call_java("deviance")
@property
@since("2.0.0")
def dispersion(self):
"""
The dispersion of the fitted model.
It is taken as 1.0 for the "binomial" and "poisson" families, and otherwise
estimated by the residual Pearson's Chi-Squared statistic (which is defined as
sum of the squares of the Pearson residuals) divided by the residual degrees of freedom.
"""
return self._call_java("dispersion")
@property
@since("2.0.0")
def aic(self):
"""
Akaike's "An Information Criterion"(AIC) for the fitted model.
"""
return self._call_java("aic")
@inherit_doc
class GeneralizedLinearRegressionTrainingSummary(GeneralizedLinearRegressionSummary):
"""
.. note:: Experimental
Generalized linear regression training results.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def numIterations(self):
"""
Number of training iterations.
"""
return self._call_java("numIterations")
@property
@since("2.0.0")
def solver(self):
"""
The numeric solver used for training.
"""
return self._call_java("solver")
@property
@since("2.0.0")
def coefficientStandardErrors(self):
"""
Standard error of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("coefficientStandardErrors")
@property
@since("2.0.0")
def tValues(self):
"""
T-statistic of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("tValues")
@property
@since("2.0.0")
def pValues(self):
"""
Two-sided p-value of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("pValues")
if __name__ == "__main__":
import doctest
import pyspark.ml.regression
from pyspark.sql import SparkSession
globs = pyspark.ml.regression.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.regression tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
exit(-1)
| alec-heif/MIT-Thesis | spark-bin/python/pyspark/ml/regression.py | Python | mit | 59,191 | [
"Gaussian"
] | 53208cd18c7c89cd154a19ed4470774db45a5c1f388de544e2b640945a940247 |
from vtk import *
from vtk.web.camera import *
from vtk.web.query_data_model import *
import json, os, math, gzip, shutil
# Global helper variables
encode_codes = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
arrayTypesMapping = ' bBhHiIlLfd'
jsMapping = {
'b': 'Int8Array',
'B': 'Uint8Array',
'h': 'Int16Array',
'H': 'Int16Array',
'i': 'Int32Array',
'I': 'Uint32Array',
'l': 'Int32Array',
'L': 'Uint32Array',
'f': 'Float32Array',
'd': 'Float64Array'
}
# -----------------------------------------------------------------------------
# Capture image from render window
# -----------------------------------------------------------------------------
class CaptureRenderWindow(object):
def __init__(self, magnification=1):
self.windowToImage = vtkWindowToImageFilter()
self.windowToImage.SetMagnification(magnification)
self.windowToImage.SetInputBufferTypeToRGB()
self.windowToImage.ReadFrontBufferOn()
self.writer = None
def SetRenderWindow(self, renderWindow):
self.windowToImage.SetInput(renderWindow)
def SetFormat(self, mimeType):
if mimeType == 'image/png':
self.writer = vtkPNGWriter()
self.writer.SetInputConnection(self.windowToImage.GetOutputPort())
elif mimeType == 'image/jpg':
self.writer = vtkJPEGWriter()
self.writer.SetInputConnection(self.windowToImage.GetOutputPort())
def writeImage(self, path):
if self.writer:
self.windowToImage.Modified()
self.windowToImage.Update()
self.writer.SetFileName(path)
self.writer.Write()
# -----------------------------------------------------------------------------
# Basic Dataset Builder
# -----------------------------------------------------------------------------
class DataSetBuilder(object):
def __init__(self, location, camera_data, metadata={}, sections={}):
self.dataHandler = DataHandler(location)
self.cameraDescription = camera_data
self.camera = None
self.imageCapture = CaptureRenderWindow()
for key, value in metadata.iteritems():
self.dataHandler.addMetaData(key, value)
for key, value in sections.iteritems():
self.dataHandler.addSection(key, value)
def getDataHandler(self):
return self.dataHandler
def getCamera(self):
return self.camera
def updateCamera(self, camera):
update_camera(self.renderer, camera)
self.renderWindow.Render()
def start(self, renderWindow=None, renderer=None):
if renderWindow:
# Keep track of renderWindow and renderer
self.renderWindow = renderWindow
self.renderer = renderer
# Initialize image capture
self.imageCapture.SetRenderWindow(renderWindow)
# Handle camera if any
if self.cameraDescription:
if self.cameraDescription['type'] == 'spherical':
self.camera = create_spherical_camera(renderer, self.dataHandler, self.cameraDescription['phi'], self.cameraDescription['theta'])
elif self.cameraDescription['type'] == 'cylindrical':
self.camera = create_cylindrical_camera(renderer, self.dataHandler, self.cameraDescription['phi'], self.cameraDescription['translation'])
# Update background color
bgColor = renderer.GetBackground()
bgColorString = 'rgb(%d, %d, %d)' % tuple(int(bgColor[i]*255) for i in range(3))
self.dataHandler.addMetaData('backgroundColor', bgColorString)
# Update file patterns
self.dataHandler.updateBasePattern()
def stop(self):
self.dataHandler.writeDataDescriptor()
# -----------------------------------------------------------------------------
# Image Dataset Builder
# -----------------------------------------------------------------------------
class ImageDataSetBuilder(DataSetBuilder):
def __init__(self, location, imageMimeType, cameraInfo, metadata={}, sections={}):
DataSetBuilder.__init__(self, location, cameraInfo, metadata, sections)
imageExtenstion = '.' + imageMimeType.split('/')[1]
self.dataHandler.registerData(name='image', type='blob', mimeType=imageMimeType, fileName=imageExtenstion)
self.imageCapture.SetFormat(imageMimeType)
def writeImage(self):
self.imageCapture.writeImage(self.dataHandler.getDataAbsoluteFilePath('image'))
def writeImages(self):
for cam in self.camera:
update_camera(self.renderer, cam)
self.renderWindow.Render()
self.imageCapture.writeImage(self.dataHandler.getDataAbsoluteFilePath('image'))
# -----------------------------------------------------------------------------
# Volume Composite Dataset Builder
# -----------------------------------------------------------------------------
class VolumeCompositeDataSetBuilder(DataSetBuilder):
def __init__(self, location, imageMimeType, cameraInfo, metadata={}, sections={}):
DataSetBuilder.__init__(self, location, cameraInfo, metadata, sections)
self.dataHandler.addTypes('volume-composite', 'rgba+depth')
self.imageMimeType = imageMimeType
self.imageExtenstion = '.' + imageMimeType.split('/')[1]
if imageMimeType == 'image/png':
self.imageWriter = vtkPNGWriter()
if imageMimeType == 'image/jpg':
self.imageWriter = vtkJPEGWriter()
self.imageDataColor = vtkImageData()
self.imageWriter.SetInputData(self.imageDataColor)
self.imageDataDepth = vtkImageData()
self.depthToWrite = None
self.layerInfo = {}
self.colorByMapping = {}
self.compositePipeline = {'layers': [], 'dimensions': [], 'fields': {}, 'layer_fields': {}, 'pipeline': []}
self.activeDepthKey = ''
self.activeRGBKey = ''
self.nodeWithChildren = {}
def _getColorCode(self, colorBy):
if colorBy in self.colorByMapping:
# The color code exist
return self.colorByMapping[colorBy]
else:
# No color code assigned yet
colorCode = encode_codes[len(self.colorByMapping)]
# Assign color code
self.colorByMapping[colorBy] = colorCode
# Register color code with color by value
self.compositePipeline['fields'][colorCode] = colorBy
# Return the color code
return colorCode
def _getLayerCode(self, parent, layerName):
if layerName in self.layerInfo:
# Layer already exist
return (self.layerInfo[layerName]['code'], False)
else:
layerCode = encode_codes[len(self.layerInfo)]
self.layerInfo[layerName] = { 'code': layerCode, 'name': layerName, 'parent': parent }
self.compositePipeline['layers'].append(layerCode)
self.compositePipeline['layer_fields'][layerCode] = []
# Let's register it in the pipeline
if parent:
if parent not in self.nodeWithChildren:
# Need to create parent
rootNode = {'name': parent, 'ids': [], 'children':[]}
self.nodeWithChildren[parent] = rootNode
self.compositePipeline['pipeline'].append(rootNode)
# Add node to its parent
self.nodeWithChildren[parent]['children'].append({'name': layerName, 'ids': [layerCode]})
self.nodeWithChildren[parent]['ids'].append(layerCode)
else:
self.compositePipeline['pipeline'].append({'name': layerName, 'ids': [layerCode]})
return (layerCode, True)
def _needToRegisterColor(self, layerCode, colorCode):
if colorCode in self.compositePipeline['layer_fields'][layerCode]:
return False
else:
self.compositePipeline['layer_fields'][layerCode].append(colorCode)
return True
def activateLayer(self, parent, name, colorBy):
layerCode, needToRegisterDepth = self._getLayerCode(parent, name)
colorCode = self._getColorCode(colorBy)
needToRegisterColor = self._needToRegisterColor(layerCode, colorCode)
# Update active keys
self.activeDepthKey = '%s_depth' % layerCode
self.activeRGBKey = '%s%s_rgb' % (layerCode, colorCode)
# Need to register data
if needToRegisterDepth:
self.dataHandler.registerData(name=self.activeDepthKey, type='array', fileName='/%s_depth.uint8' % layerCode, categories=[ layerCode ])
if needToRegisterColor:
self.dataHandler.registerData(name=self.activeRGBKey, type='blob', fileName='/%s%s_rgb%s' % (layerCode, colorCode, self.imageExtenstion), categories=[ '%s%s' % (layerCode, colorCode) ], mimeType=self.imageMimeType)
def writeData(self, mapper):
width = self.renderWindow.GetSize()[0]
height = self.renderWindow.GetSize()[1]
if not self.depthToWrite:
self.depthToWrite = bytearray(width * height)
for cam in self.camera:
self.updateCamera(cam)
imagePath = self.dataHandler.getDataAbsoluteFilePath(self.activeRGBKey)
depthPath = self.dataHandler.getDataAbsoluteFilePath(self.activeDepthKey)
# -----------------------------------------------------------------
# Write Image
# -----------------------------------------------------------------
mapper.GetColorImage(self.imageDataColor)
self.imageWriter.SetFileName(imagePath)
self.imageWriter.Write()
# -----------------------------------------------------------------
# Write Depth
# -----------------------------------------------------------------
mapper.GetDepthImage(self.imageDataDepth)
inputArray = self.imageDataDepth.GetPointData().GetArray(0)
size = inputArray.GetNumberOfTuples()
for idx in range(size):
self.depthToWrite[idx] = int(inputArray.GetValue(idx))
with open(depthPath, 'wb') as f:
f.write(self.depthToWrite)
def start(self, renderWindow, renderer):
DataSetBuilder.start(self, renderWindow, renderer)
self.camera.updatePriority([2,1])
def stop(self, compress=True):
# Push metadata
self.compositePipeline['dimensions'] = self.renderWindow.GetSize()
self.compositePipeline['default_pipeline'] = 'A'.join(self.compositePipeline['layers']) + 'A'
self.dataHandler.addSection('CompositePipeline', self.compositePipeline)
# Write metadata
DataSetBuilder.stop(self)
if compress:
for root, dirs, files in os.walk(self.dataHandler.getBasePath()):
print 'Compress', root
for name in files:
if '.uint8' in name and '.gz' not in name:
with open(os.path.join(root, name), 'rb') as f_in, gzip.open(os.path.join(root, name + '.gz'), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(os.path.join(root, name))
# -----------------------------------------------------------------------------
# Data Prober Dataset Builder
# -----------------------------------------------------------------------------
class DataProberDataSetBuilder(DataSetBuilder):
def __init__(self, location, sampling_dimesions, fields_to_keep, custom_probing_bounds = None, metadata={}):
DataSetBuilder.__init__(self, location, None, metadata)
self.fieldsToWrite = fields_to_keep
self.resamplerFilter = vtkPResampleFilter()
self.resamplerFilter.SetSamplingDimension(sampling_dimesions)
if custom_probing_bounds:
self.resamplerFilter.SetUseInputBounds(0)
self.resamplerFilter.SetCustomSamplingBounds(custom_probing_bounds)
else:
self.resamplerFilter.SetUseInputBounds(1)
# Register all fields
self.dataHandler.addTypes('data-prober', 'binary')
self.DataProber = { 'types': {}, 'dimensions': sampling_dimesions, 'ranges': {}, 'spacing': [1,1,1] }
for field in self.fieldsToWrite:
self.dataHandler.registerData(name=field, type='array', fileName='/%s.array' % field)
def setDataToProbe(self, dataset):
self.resamplerFilter.SetInputData(dataset)
def setSourceToProbe(self, source):
self.resamplerFilter.SetInputConnection(source.GetOutputPort())
def writeData(self):
self.resamplerFilter.Update()
arrays = self.resamplerFilter.GetOutput().GetPointData()
for field in self.fieldsToWrite:
array = arrays.GetArray(field)
if array:
b = buffer(array)
with open(self.dataHandler.getDataAbsoluteFilePath(field), 'wb') as f:
f.write(b)
self.DataProber['types'][field] = jsMapping[arrayTypesMapping[array.GetDataType()]]
if field in self.DataProber['ranges']:
dataRange = array.GetRange()
if dataRange[0] < self.DataProber['ranges'][field][0]:
self.DataProber['ranges'][field][0] = dataRange[0]
if dataRange[1] > self.DataProber['ranges'][field][1]:
self.DataProber['ranges'][field][1] = dataRange[1]
else:
self.DataProber['ranges'][field] = [array.GetRange()[0], array.GetRange()[1]]
else:
print 'No array for', field
print self.resamplerFilter.GetOutput()
def stop(self, compress=True):
# Push metadata
self.dataHandler.addSection('DataProber', self.DataProber)
# Write metadata
DataSetBuilder.stop(self)
if compress:
for root, dirs, files in os.walk(self.dataHandler.getBasePath()):
print 'Compress', root
for name in files:
if '.array' in name and '.gz' not in name:
with open(os.path.join(root, name), 'rb') as f_in, gzip.open(os.path.join(root, name + '.gz'), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(os.path.join(root, name))
# -----------------------------------------------------------------------------
# Sorted Composite Dataset Builder
# -----------------------------------------------------------------------------
class ConvertVolumeStackToSortedStack(object):
def __init__(self, width, height):
self.width = width
self.height = height
self.layers = 0
def convert(self, directory):
imagePaths = {}
depthPaths = {}
layerNames = []
for fileName in os.listdir(directory):
if '_rgb' in fileName or '_depth' in fileName:
fileId = fileName.split('_')[0][0]
if '_rgb' in fileName:
imagePaths[fileId] = os.path.join(directory, fileName)
else:
layerNames.append(fileId)
depthPaths[fileId] = os.path.join(directory, fileName)
layerNames.sort()
if len(layerNames) == 0:
return
# Load data in Memory
depthArrays = []
imageReader = vtkPNGReader()
numberOfValues = self.width * self.height * len(layerNames)
imageSize = self.width * self.height
self.layers = len(layerNames)
# Write all images as single buffer
opacity = vtkUnsignedCharArray()
opacity.SetNumberOfComponents(1)
opacity.SetNumberOfTuples(numberOfValues)
intensity = vtkUnsignedCharArray()
intensity.SetNumberOfComponents(1)
intensity.SetNumberOfTuples(numberOfValues)
for layer in range(self.layers):
imageReader.SetFileName(imagePaths[layerNames[layer]])
imageReader.Update()
rgbaArray = imageReader.GetOutput().GetPointData().GetArray(0)
for idx in range(imageSize):
intensity.SetValue((layer * imageSize) + idx, rgbaArray.GetValue(idx*4))
opacity.SetValue((layer * imageSize) + idx, rgbaArray.GetValue(idx*4 + 3))
with open(depthPaths[layerNames[layer]], 'rb') as depthFile:
depthArrays.append(depthFile.read())
# Apply pixel sorting
destOrder = vtkUnsignedCharArray()
destOrder.SetNumberOfComponents(1)
destOrder.SetNumberOfTuples(numberOfValues)
opacityOrder = vtkUnsignedCharArray()
opacityOrder.SetNumberOfComponents(1)
opacityOrder.SetNumberOfTuples(numberOfValues)
intensityOrder = vtkUnsignedCharArray()
intensityOrder.SetNumberOfComponents(1)
intensityOrder.SetNumberOfTuples(numberOfValues)
for pixelIdx in range(imageSize):
depthStack = []
for depthArray in depthArrays:
depthStack.append((depthArray[pixelIdx], len(depthStack)))
depthStack.sort(key=lambda tup: tup[0])
for destLayerIdx in range(len(depthStack)):
sourceLayerIdx = depthStack[destLayerIdx][1]
# Copy Idx
destOrder.SetValue((imageSize * destLayerIdx) + pixelIdx, sourceLayerIdx)
opacityOrder.SetValue((imageSize * destLayerIdx) + pixelIdx, opacity.GetValue((imageSize * sourceLayerIdx) + pixelIdx))
intensityOrder.SetValue((imageSize * destLayerIdx) + pixelIdx, intensity.GetValue((imageSize * sourceLayerIdx) + pixelIdx))
with open(os.path.join(directory, 'alpha.uint8'), 'wb') as f:
f.write(buffer(opacityOrder))
with open(os.path.join(directory, 'intensity.uint8'), 'wb') as f:
f.write(buffer(intensityOrder))
with open(os.path.join(directory, 'order.uint8'), 'wb') as f:
f.write(buffer(destOrder))
class SortedCompositeDataSetBuilder(VolumeCompositeDataSetBuilder):
def __init__(self, location, cameraInfo, metadata={}, sections={}):
VolumeCompositeDataSetBuilder.__init__(self, location, 'image/png', cameraInfo, metadata, sections)
self.dataHandler.addTypes('sorted-composite', 'rgba')
# Register order and color textures
self.layerScalars = []
self.dataHandler.registerData(name='order', type='array', fileName='/order.uint8')
self.dataHandler.registerData(name='alpha', type='array', fileName='/alpha.uint8')
self.dataHandler.registerData(name='intensity', type='array', fileName='/intensity.uint8', categories=['intensity'])
def start(self, renderWindow, renderer):
VolumeCompositeDataSetBuilder.start(self, renderWindow, renderer)
imageSize = self.renderWindow.GetSize()
self.dataConverter = ConvertVolumeStackToSortedStack(imageSize[0], imageSize[1])
def activateLayer(self, colorBy, scalar):
VolumeCompositeDataSetBuilder.activateLayer(self, 'root', '%s' % scalar, colorBy)
self.layerScalars.append(scalar)
def writeData(self, mapper):
VolumeCompositeDataSetBuilder.writeData(self, mapper)
# Fill data pattern
self.dataHandler.getDataAbsoluteFilePath('order')
self.dataHandler.getDataAbsoluteFilePath('alpha')
self.dataHandler.getDataAbsoluteFilePath('intensity')
def stop(self, clean=True, compress=True):
VolumeCompositeDataSetBuilder.stop(self, compress=False)
# Go through all directories and convert them
for root, dirs, files in os.walk(self.dataHandler.getBasePath()):
for name in dirs:
print 'Process', os.path.join(root, name)
self.dataConverter.convert(os.path.join(root, name))
# Rename index.json to info_origin.json
os.rename(os.path.join(self.dataHandler.getBasePath(), "index.json"), os.path.join(self.dataHandler.getBasePath(), "index_origin.json"))
# Update index.json
with open(os.path.join(self.dataHandler.getBasePath(), "index_origin.json"), "r") as infoFile:
metadata = json.load(infoFile)
metadata['SortedComposite'] = {
'dimensions': metadata['CompositePipeline']['dimensions'],
'layers': self.dataConverter.layers,
'scalars': self.layerScalars[0:self.dataConverter.layers]
}
# Clean metadata
dataToKeep = []
del metadata['CompositePipeline']
for item in metadata['data']:
if item['name'] in ['order', 'alpha', 'intensity']:
dataToKeep.append(item)
metadata['data'] = dataToKeep
metadata['type'] = [ "tonic-query-data-model", "sorted-composite", "alpha" ]
# Override index.json
with open(os.path.join(self.dataHandler.getBasePath(), "index.json"), 'w') as newMetaFile:
newMetaFile.write(json.dumps(metadata))
# Clean temporary data
if clean:
for root, dirs, files in os.walk(self.dataHandler.getBasePath()):
print 'Clean', root
for name in files:
if '_rgb.png' in name or '_depth.uint8' in name or name == "index_origin.json":
os.remove(os.path.join(root, name))
if compress:
for root, dirs, files in os.walk(self.dataHandler.getBasePath()):
print 'Compress', root
for name in files:
if '.uint8' in name and '.gz' not in name:
with open(os.path.join(root, name), 'rb') as f_in, gzip.open(os.path.join(root, name + '.gz'), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(os.path.join(root, name))
| SimVascular/VTK | Web/Python/vtk/web/dataset_builder.py | Python | bsd-3-clause | 22,027 | [
"VTK"
] | 38f72ae8d4e5fe9bc1b6af46ebffea46aa89360a8eeae54fb9fda5ca3c6fbac5 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Access information (reads, writes) resolution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.py2tf.pyct import anno
# TODO(mdan): Add support for PY3 (e.g. Param vs arg).
class Scope(object):
"""Encloses local symbol definition and usage information.
This can track for instance whether a symbol is modified in the current scope.
Note that scopes do not necessarily align with Python's scopes. For example,
the body of an if statement may be considered a separate scope.
Attributes:
modified: identifiers modified in this scope
created: identifiers created in this scope
used: identifiers referenced in this scope
"""
def __init__(self, parent, isolated=True):
"""Create a new scope.
Args:
parent: A Scope or None.
isolated: Whether the scope is isolated, that is, whether variables
created in this scope should be visible to the parent scope.
"""
self.isolated = isolated
self.parent = parent
self.modified = set()
self.created = set()
self.used = set()
def __repr__(self):
return 'Scope{r=%s, c=%s, w=%s}' % (tuple(self.used), tuple(self.created),
tuple(self.modified))
def has(self, name):
if name in self.modified:
return True
elif self.parent is not None:
return self.parent.has(name)
return False
def mark_read(self, name):
self.used.add(name)
if self.parent is not None and self.parent.has(
name) and name not in self.created:
self.parent.mark_read(name)
def mark_write(self, name):
self.modified.add(name)
if self.parent is not None and self.parent.has(name):
self.parent.mark_write(name)
else:
if not self.isolated:
self.parent.mark_write(name)
self.created.add(name)
class AccessResolver(gast.NodeTransformer):
"""Annotates nodes with local scope information. See Scope."""
def __init__(self):
self.scope = Scope(None)
def visit_Name(self, node):
# TODO(mdan): This is insufficient for object fields, e.g. hp.learning_rate.
self.generic_visit(node)
if isinstance(node.ctx, gast.Store):
self.scope.mark_write(node.id)
elif isinstance(node.ctx, gast.Load):
anno.setanno(node, 'is_local', self.scope.has(node.id))
self.scope.mark_read(node.id)
elif isinstance(node.ctx, gast.Param):
# Param contexts appear in function defs, so they have the meaning of
# defining a variable.
# TODO(mdan): This bay be incorrect with nested functions.
# For nested functions, we'll have to add the notion of hiding args from
# the parent scope, not writing to them.
self.scope.mark_write(node.id)
else:
raise ValueError('Unknown context %s for node %s.' % (type(node.ctx),
node.id))
return node
def visit_Print(self, node):
current_scope = self.scope
args_scope = Scope(current_scope)
self.scope = args_scope
for n in node.values:
self.visit(n)
anno.setanno(node, 'args_scope', args_scope)
self.scope = current_scope
return node
def visit_Call(self, node):
current_scope = self.scope
args_scope = Scope(current_scope)
self.scope = args_scope
for n in node.args:
self.visit(n)
# TODO(mdan): Account starargs, kwargs
for n in node.keywords:
self.visit(n)
anno.setanno(node, 'args_scope', args_scope)
self.scope = current_scope
self.visit(node.func)
return node
def visit_For(self, node):
raise NotImplementedError()
def visit_While(self, node):
self.visit(node.test)
current_scope = self.scope
anno.setanno(node, 'parent_scope', current_scope)
body_scope = Scope(current_scope, isolated=False)
self.scope = body_scope
for n in node.body:
self.visit(n)
anno.setanno(node, 'body_scope', body_scope)
if node.orelse:
raise NotImplementedError()
# TODO(mdan): Add support for orelse.
self.scope = current_scope
return node
def resolve(node):
return AccessResolver().visit(node)
| jwlawson/tensorflow | tensorflow/contrib/py2tf/pyct/static_analysis/access.py | Python | apache-2.0 | 4,911 | [
"VisIt"
] | 03f68cef397dcef619e7748d264428115b0960ff4cc90d7af4ec5fcb313234d0 |
import os
import sys
from subprocess import Popen, PIPE, check_call, check_output
import pprint
import re
# This script is intended to help copy dynamic libraries used by FreeCAD into
# a Mac application bundle and change dyld commands as appropriate. There are
# two key items that this currently does differently from other similar tools:
#
# * @rpath is used rather than @executable_path because the libraries need to
# be loadable through a Python interpreter and the FreeCAD binaries.
# * We need to be able to add multiple rpaths in some libraries.
# Assume any libraries in these paths don't need to be bundled
systemPaths = [ "/System/", "/usr/lib/",
"/Library/Frameworks/3DconnexionClient.framework/" ]
# If a library is in these paths, but not systemPaths, a warning will be
# issued and it will NOT be bundled. Generally, libraries installed by
# MacPorts or Homebrew won't end up in /Library/Frameworks, so we assume
# that libraries found there aren't meant to be bundled.
warnPaths = ["/Library/Frameworks/"]
class LibraryNotFound(Exception):
pass
class Node:
"""
self.path should be an absolute path to self.name
"""
def __init__(self, name, path="", children=None):
self.name = name
self.path = path
if not children:
children = list()
self.children = children
self._marked = False
def __eq__(self, other):
if not isinstance(other, Node):
return False
return self.name == other.name
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
class DepsGraph:
graph = {}
def in_graph(self, node):
return node.name in self.graph.keys()
def add_node(self, node):
self.graph[node.name] = node
def get_node(self, name):
if self.graph.has_key(name):
return self.graph[name]
return None
def visit(self, operation, op_args=[]):
""""
Preform a depth first visit of the graph, calling operation
on each node.
"""
stack = []
for k in self.graph.keys():
self.graph[k]._marked = False
for k in self.graph.keys():
if not self.graph[k]._marked:
stack.append(k)
while stack:
node_key = stack.pop()
self.graph[node_key]._marked = True
for ck in self.graph[node_key].children:
if not self.graph[ck]._marked:
stack.append(ck)
operation(self, self.graph[node_key], *op_args)
def is_macho(path):
output = check_output(["file", path])
if output.count("Mach-O") != 0:
return True
return False
def is_system_lib(lib):
for p in systemPaths:
if lib.startswith(p):
return True
for p in warnPaths:
if lib.startswith(p):
print "WARNING: library %s will not be bundled!" % lib
print "See MakeMacRelocatable.py for more information."
return True
return False
def get_path(name, search_paths):
for path in search_paths:
if os.path.isfile(os.path.join(path, name)):
return path
return None
def list_install_names(path_macho):
output = check_output(["otool", "-L", path_macho])
lines = output.split("\t")
libs = []
#first line is the the filename, and if it is a library, the second line
#is the install name of it
if path_macho.endswith(os.path.basename(lines[1].split(" (")[0])):
lines = lines[2:]
else:
lines = lines[1:]
for line in lines:
lib = line.split(" (")[0]
if not is_system_lib(lib):
libs.append(lib)
return libs
def library_paths(install_names, search_paths):
paths = []
for name in install_names:
path = os.path.dirname(name)
lib_name = os.path.basename(name)
if path == "" or name[0] == "@":
#not absolute -- we need to find the path of this lib
path = get_path(lib_name, search_paths)
paths.append(os.path.join(path, lib_name))
return paths
def create_dep_nodes(install_names, search_paths):
"""
Return a list of Node objects from the provided install names.
"""
nodes = []
for lib in install_names:
install_path = os.path.dirname(lib)
lib_name = os.path.basename(lib)
#even if install_path is absolute, see if library can be found by
#searching search_paths, so that we have control over what library
#location to use
path = get_path(lib_name, search_paths)
if install_path != "" and lib[0] != "@":
#we have an absolute path install name
if not path:
path = install_path
if not path:
raise LibraryNotFound(lib_name + "not found in given paths")
nodes.append(Node(lib_name, path))
return nodes
def paths_at_depth(prefix, paths, depth):
filtered = []
for p in paths:
dirs = os.path.join(prefix, p).strip('/').split('/')
if len(dirs) == depth:
filtered.append(p)
return filtered
def should_visit(prefix, path_filters, path):
s_path = path.strip('/').split('/')
filters = []
#we only want to use filters if they have the same parent as path
for rel_pf in path_filters:
pf = os.path.join(prefix, rel_pf)
if os.path.split(pf)[0] == os.path.split(path)[0]:
filters.append(pf)
if not filters:
#no filter that applies to this path
return True
for pf in filters:
s_filter = pf.strip('/').split('/')
length = len(s_filter)
matched = 0
for i in range(len(s_path)):
if s_path[i] == s_filter[i]:
matched += 1
if matched == length or matched == len(s_path):
return True
return False
def build_deps_graph(graph, bundle_path, dirs_filter=None, search_paths=[]):
"""
Walk bundle_path and build a graph of the encountered Mach-O binaries
and there dependencies
"""
#make a local copy since we add to it
s_paths = list(search_paths)
visited = {}
for root, dirs, files in os.walk(bundle_path):
if dirs_filter != None:
dirs[:] = [d for d in dirs if should_visit(bundle_path, dirs_filter,
os.path.join(root, d))]
s_paths.insert(0, root)
for f in files:
fpath = os.path.join(root, f)
ext = os.path.splitext(f)[1]
if ( (ext == "" and is_macho(fpath)) or
ext == ".so" or ext == ".dylib" ):
visited[fpath] = False
stack = []
for k in visited.keys():
if not visited[k]:
stack.append(k)
while stack:
k2 = stack.pop()
visited[k2] = True
node = Node(os.path.basename(k2), os.path.dirname(k2))
if not graph.in_graph(node):
graph.add_node(node)
deps = create_dep_nodes(list_install_names(k2), s_paths)
for d in deps:
if d.name not in node.children:
node.children.append(d.name)
dk = os.path.join(d.path, d.name)
if dk not in visited.keys():
visited[dk] = False
if not visited[dk]:
stack.append(dk)
def in_bundle(lib, bundle_path):
if lib.startswith(bundle_path):
return True
return False
def copy_into_bundle(graph, node, bundle_path):
if not in_bundle(node.path, bundle_path):
check_call([ "cp", "-L", os.path.join(node.path, node.name),
os.path.join(bundle_path, "lib", node.name) ])
node.path = os.path.join(bundle_path, "lib")
#fix permissions
check_call([ "chmod", "a+w",
os.path.join(bundle_path, "lib", node.name) ])
def get_rpaths(library):
"Returns a list of rpaths specified within library"
out = check_output(["otool", "-l", library])
pathRegex = r"^path (.*) \(offset \d+\)$"
expectingRpath = False
rpaths = []
for line in out.split('\n'):
line = line.strip()
if "cmd LC_RPATH" in line:
expectingRpath = True
elif "Load command" in line:
expectingRpath = False
elif expectingRpath:
m = re.match(pathRegex, line)
if m:
rpaths.append(m.group(1))
expectingRpath = False
return rpaths
def add_rpaths(graph, node, bundle_path):
if node.children:
lib = os.path.join(node.path, node.name)
if in_bundle(lib, bundle_path):
install_names = list_install_names(lib)
rpaths = []
for install_name in install_names:
name = os.path.basename(install_name)
#change install names to use rpaths
check_call([ "install_name_tool", "-change",
install_name, "@rpath/" + name, lib ])
dep_node = node.children[node.children.index(name)]
rel_path = os.path.relpath(graph.get_node(dep_node).path,
node.path)
rpath = ""
if rel_path == ".":
rpath = "@loader_path/"
else:
rpath = "@loader_path/" + rel_path + "/"
if rpath not in rpaths:
rpaths.append(rpath)
for rpath in rpaths:
# Ensure that lib has rpath set
if not rpath in get_rpaths(lib):
check_output([ "install_name_tool",
"-add_rpath", rpath, lib ])
def main():
if len(sys.argv) < 2:
print "Usage " + sys.argv[0] + " path [additional search paths]"
quit()
path = sys.argv[1]
bundle_path = os.path.abspath(os.path.join(path, "Contents"))
graph = DepsGraph()
dir_filter = ["bin", "lib", "Mod", "Mod/PartDesign",
"lib/python2.7/site-packages",
"lib/python2.7/lib-dynload"]
search_paths = [bundle_path + "/lib"] + sys.argv[2:]
build_deps_graph(graph, bundle_path, dir_filter, search_paths)
graph.visit(copy_into_bundle, [bundle_path])
graph.visit(add_rpaths, [bundle_path])
if __name__ == "__main__":
main()
| timthelion/FreeCAD | src/Tools/MakeMacBundleRelocatable.py | Python | lgpl-2.1 | 11,092 | [
"VisIt"
] | 8dddc43e74c95e8dc542ea8b671658571e9e51f3f430a073073d91bb98b216dc |
from ..simstore.custom_json import JSONCodec
try:
import mdtraj as md
except ImportError:
md = None
HAS_MDTRAJ = False
else:
HAS_MDTRAJ = True
import pandas as pd
def _check_mdtraj():
if not HAS_MDTRAJ:
raise RuntimeError("Unable to import MDTraj.")
def traj_to_dict(obj):
return {'xyz': obj.xyz,
'topology': obj.topology,
'time': obj.time,
'unitcell_lengths': obj.unitcell_lengths,
'unitcell_angles': obj.unitcell_angles}
def traj_from_dict(dct):
_check_mdtraj()
dct = {k: v for k, v in dct.items()
if k not in ['__class__', '__module__']}
return md.Trajectory(**dct)
def topology_to_dict(obj):
dataframe, bonds = obj.to_dataframe()
return {'atoms': dataframe.to_json(),
'bonds': bonds}
def topology_from_dict(dct):
_check_mdtraj()
return md.Topology.from_dataframe(
atoms=pd.read_json(dct['atoms']),
bonds=dct['bonds']
)
if HAS_MDTRAJ:
traj_codec = JSONCodec(md.Trajectory, traj_to_dict, traj_from_dict)
top_codec = JSONCodec(md.Topology, topology_to_dict, topology_from_dict)
mdtraj_codecs = [traj_codec, top_codec]
else:
mdtraj_codecs = []
| choderalab/openpathsampling | openpathsampling/experimental/storage/mdtraj_json.py | Python | lgpl-2.1 | 1,220 | [
"MDTraj"
] | 9d9309c98bc1510077b64b2e03e334358a5128d17e0909793b7d0bee1f0b0ffd |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Bibauthorid Web Interface Logic and URL handler. """
# pylint: disable=W0105
# pylint: disable=C0301
# pylint: disable=W0613
from cgi import escape
from pprint import pformat
from operator import itemgetter
import re
try:
from invenio.utils.json import json, CFG_JSON_AVAILABLE
except:
CFG_JSON_AVAILABLE = False
json = None
from invenio.legacy.bibauthorid.webapi import add_cname_to_hepname_record
from invenio.config import CFG_SITE_URL, CFG_BASE_URL
from invenio.legacy.bibauthorid.config import AID_ENABLED, PERSON_SEARCH_RESULTS_SHOW_PAPERS_PERSON_LIMIT, \
BIBAUTHORID_UI_SKIP_ARXIV_STUB_PAGE, VALID_EXPORT_FILTERS, PERSONS_PER_PAGE, \
MAX_NUM_SHOW_PAPERS
from invenio.config import CFG_SITE_LANG, CFG_SITE_NAME, CFG_INSPIRE_SITE, CFG_SITE_SECURE_URL
from invenio.legacy.bibauthorid.name_utils import most_relevant_name
from invenio.legacy.webpage import page
from invenio.base.i18n import gettext_set_language #, wash_language
from invenio.legacy.template import load
from invenio.ext.legacy.handler import wash_urlargd, WebInterfaceDirectory
from invenio.utils.url import redirect_to_url
from invenio.legacy.webuser import (
getUid,
page_not_authorized,
collect_user_info,
set_user_preferences,
get_user_preferences,
email_valid_p,
emailUnique,
get_email_from_username,
get_uid_from_email,
isGuestUser)
from invenio.modules.access.control import acc_find_user_role_actions
from invenio.legacy.search_engine import perform_request_search
from invenio.legacy.search_engine.utils import get_fieldvalues
import invenio.legacy.bibauthorid.webapi as webapi
from flask import session
from invenio.legacy.bibauthorid.config import CREATE_NEW_PERSON
import invenio.utils.apache as apache
import invenio.legacy.webauthorprofile.interface as webauthorapi
from invenio.legacy.bibauthorid.general_utils import is_valid_orcid
from invenio.legacy.bibauthorid.backinterface import update_external_ids_of_authors, get_orcid_id_of_author, \
get_validated_request_tickets_for_author, get_title_of_paper, get_claimed_papers_of_author
from invenio.legacy.bibauthorid.dbinterface import defaultdict, remove_arxiv_papers_of_author
from invenio.legacy.webauthorprofile.orcidutils import get_dois_from_orcid
from invenio.legacy.bibauthorid.webauthorprofileinterface import is_valid_canonical_id, get_person_id_from_canonical_id, \
get_person_redirect_link, author_has_papers
from invenio.legacy.bibauthorid.templates import WebProfileMenu, WebProfilePage
# Imports related to hepnames update form
from invenio.legacy.bibedit.utils import get_bibrecord
from invenio.legacy.bibrecord import record_get_field_value, record_get_field_values, \
record_get_field_instances, field_get_subfield_values
TEMPLATE = load('bibauthorid')
class WebInterfaceBibAuthorIDClaimPages(WebInterfaceDirectory):
'''
Handles /author/claim pages and AJAX requests.
Supplies the methods:
/author/claim/<string>
/author/claim/action
/author/claim/claimstub
/author/claim/export
/author/claim/generate_autoclaim_data
/author/claim/merge_profiles_ajax
/author/claim/search_box_ajax
/author/claim/tickets_admin
/author/claim/search
'''
_exports = ['',
'action',
'claimstub',
'export',
'generate_autoclaim_data',
'merge_profiles_ajax',
'search_box_ajax',
'tickets_admin'
]
def _lookup(self, component, path):
'''
This handler parses dynamic URLs:
- /author/profile/1332 shows the page of author with id: 1332
- /author/profile/100:5522,1431 shows the page of the author
identified by the bibrefrec: '100:5522,1431'
'''
if not component in self._exports:
return WebInterfaceBibAuthorIDClaimPages(component), path
def _is_profile_owner(self, pid):
return self.person_id == int(pid)
def _is_admin(self, pinfo):
return pinfo['ulevel'] == 'admin'
def __init__(self, identifier=None):
'''
Constructor of the web interface.
@param identifier: identifier of an author. Can be one of:
- an author id: e.g. "14"
- a canonical id: e.g. "J.R.Ellis.1"
- a bibrefrec: e.g. "100:1442,155"
@type identifier: str
'''
self.person_id = -1 # -1 is a non valid author identifier
if identifier is None or not isinstance(identifier, str):
return
# check if it's a canonical id: e.g. "J.R.Ellis.1"
pid = int(webapi.get_person_id_from_canonical_id(identifier))
if pid >= 0:
self.person_id = pid
return
# check if it's an author id: e.g. "14"
try:
pid = int(identifier)
if webapi.author_has_papers(pid):
self.person_id = pid
return
except ValueError:
pass
# check if it's a bibrefrec: e.g. "100:1442,155"
if webapi.is_valid_bibref(identifier):
pid = int(webapi.get_person_id_from_paper(identifier))
if pid >= 0:
self.person_id = pid
return
def __call__(self, req, form):
'''
Serve the main person page.
Will use the object's person id to get a person's information.
@param req: apache request object
@type req: apache request object
@param form: POST/GET variables of the request
@type form: dict
@return: a full page formatted in HTML
@rtype: str
'''
webapi.session_bareinit(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'open_claim': (str, None),
'ticketid': (int, -1),
'verbose': (int, 0)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
req.argd = argd # needed for perform_req_search
if self.person_id < 0:
return redirect_to_url(req, '%s/author/search' % (CFG_SITE_URL))
no_access = self._page_access_permission_wall(req, [self.person_id])
if no_access:
return no_access
pinfo['claim_in_process'] = True
user_info = collect_user_info(req)
user_info['precached_viewclaimlink'] = pinfo['claim_in_process']
session.dirty = True
if self.person_id != -1:
pinfo['claimpaper_admin_last_viewed_pid'] = self.person_id
rt_ticket_id = argd['ticketid']
if rt_ticket_id != -1:
pinfo["admin_requested_ticket_id"] = rt_ticket_id
session.dirty = True
## Create menu and page using templates
cname = webapi.get_canonical_id_from_person_id(self.person_id)
menu = WebProfileMenu(str(cname), "claim", ln, self._is_profile_owner(pinfo['pid']), self._is_admin(pinfo))
profile_page = WebProfilePage("claim", webapi.get_longest_name_from_pid(self.person_id))
profile_page.add_profile_menu(menu)
gboxstatus = self.person_id
gpid = self.person_id
gNumOfWorkers = 3 # to do: read it from conf file
gReqTimeout = 3000
gPageTimeout = 12000
profile_page.add_bootstrapped_data(json.dumps({
"other": "var gBOX_STATUS = '%s';var gPID = '%s'; var gNumOfWorkers= '%s'; var gReqTimeout= '%s'; var gPageTimeout= '%s';" % (gboxstatus, gpid, gNumOfWorkers, gReqTimeout, gPageTimeout),
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s", guestPrompt: true});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
# content += self._generate_person_info_box(ulevel, ln) #### Name variants
# metaheaderadd = self._scripts() + '\n <meta name="robots" content="nofollow" />'
# body = self._generate_optional_menu(ulevel, req, form)
content = self._generate_tabs(ulevel, req)
content += self._generate_footer(ulevel)
content = content.decode('utf-8', 'strict')
webapi.history_log_visit(req, 'claim', pid=self.person_id)
return page(title=self._generate_title(ulevel),
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=profile_page.get_wrapped_body(content).encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _page_access_permission_wall(self, req, req_pid=None, req_level=None):
'''
Display an error page if user not authorized to use the interface.
@param req: Apache Request Object for session management
@type req: Apache Request Object
@param req_pid: Requested person id
@type req_pid: int
@param req_level: Request level required for the page
@type req_level: string
'''
uid = getUid(req)
pinfo = session["personinfo"]
uinfo = collect_user_info(req)
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
is_authorized = True
pids_to_check = []
if not AID_ENABLED:
return page_not_authorized(req, text=_("Fatal: Author ID capabilities are disabled on this system."))
if req_level and 'ulevel' in pinfo and pinfo["ulevel"] != req_level:
return page_not_authorized(req, text=_("Fatal: You are not allowed to access this functionality."))
if req_pid and not isinstance(req_pid, list):
pids_to_check = [req_pid]
elif req_pid and isinstance(req_pid, list):
pids_to_check = req_pid
if (not (uinfo['precached_usepaperclaim']
or uinfo['precached_usepaperattribution'])
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
is_authorized = False
if is_authorized and not webapi.user_can_view_CMP(uid):
is_authorized = False
if is_authorized and 'ticket' in pinfo:
for tic in pinfo["ticket"]:
if 'pid' in tic:
pids_to_check.append(tic['pid'])
if pids_to_check and is_authorized:
user_pid = webapi.get_pid_from_uid(uid)
if not uinfo['precached_usepaperattribution']:
if (not user_pid in pids_to_check
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
is_authorized = False
elif (user_pid in pids_to_check
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
for tic in list(pinfo["ticket"]):
if not tic["pid"] == user_pid:
pinfo['ticket'].remove(tic)
if not is_authorized:
return page_not_authorized(req, text=_("Fatal: You are not allowed to access this functionality."))
else:
return ""
def _generate_title(self, ulevel):
'''
Generates the title for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@return: title
@rtype: str
'''
def generate_title_guest():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
def generate_title_user():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers (user interface) for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
def generate_title_admin():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers (administrator interface) for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
generate_title = {'guest': generate_title_guest,
'user': generate_title_user,
'admin': generate_title_admin}
return generate_title[ulevel]()
def _generate_optional_menu(self, ulevel, req, form):
'''
Generates the menu for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@param req: apache request object
@type req: apache request object
@param form: POST/GET variables of the request
@type form: dict
@return: menu
@rtype: str
'''
def generate_optional_menu_guest(req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
menu = TEMPLATE.tmpl_person_menu(self.person_id, argd['ln'])
if "verbose" in argd and argd["verbose"] > 0:
pinfo = session['personinfo']
menu += "\n<pre>" + pformat(pinfo) + "</pre>\n"
return menu
def generate_optional_menu_user(req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
menu = TEMPLATE.tmpl_person_menu(self.person_id, argd['ln'])
if "verbose" in argd and argd["verbose"] > 0:
pinfo = session['personinfo']
menu += "\n<pre>" + pformat(pinfo) + "</pre>\n"
return menu
def generate_optional_menu_admin(req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
menu = TEMPLATE.tmpl_person_menu_admin(self.person_id, argd['ln'])
if "verbose" in argd and argd["verbose"] > 0:
pinfo = session['personinfo']
menu += "\n<pre>" + pformat(pinfo) + "</pre>\n"
return menu
generate_optional_menu = {'guest': generate_optional_menu_guest,
'user': generate_optional_menu_user,
'admin': generate_optional_menu_admin}
return "<div class=\"clearfix\">" + generate_optional_menu[ulevel](req, form) + "</div>"
def _generate_ticket_box(self, ulevel, req):
'''
Generates the semi-permanent info box for the specified user permission
level.
@param ulevel: user permission level
@type ulevel: str
@param req: apache request object
@type req: apache request object
@return: info box
@rtype: str
'''
def generate_ticket_box_guest(req):
pinfo = session['personinfo']
ticket = pinfo['ticket']
results = list()
pendingt = list()
for t in ticket:
if 'execution_result' in t:
for res in t['execution_result']:
results.append(res)
else:
pendingt.append(t)
box = ""
if pendingt:
box += TEMPLATE.tmpl_ticket_box('in_process', 'transaction', len(pendingt))
if results:
failed = [messages for status, messages in results if not status]
if failed:
box += TEMPLATE.tmpl_transaction_box('failure', failed)
successfull = [messages for status, messages in results if status]
if successfull:
box += TEMPLATE.tmpl_transaction_box('success', successfull)
return box
def generate_ticket_box_user(req):
return generate_ticket_box_guest(req)
def generate_ticket_box_admin(req):
return generate_ticket_box_guest(req)
generate_ticket_box = {'guest': generate_ticket_box_guest,
'user': generate_ticket_box_user,
'admin': generate_ticket_box_admin}
return generate_ticket_box[ulevel](req)
def _generate_person_info_box(self, ulevel, ln):
'''
Generates the name info box for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@param ln: page display language
@type ln: str
@return: name info box
@rtype: str
'''
def generate_person_info_box_guest(ln):
names = webapi.get_person_names_from_id(self.person_id)
box = TEMPLATE.tmpl_admin_person_info_box(ln, person_id=self.person_id,
names=names)
return box
def generate_person_info_box_user(ln):
return generate_person_info_box_guest(ln)
def generate_person_info_box_admin(ln):
return generate_person_info_box_guest(ln)
generate_person_info_box = {'guest': generate_person_info_box_guest,
'user': generate_person_info_box_user,
'admin': generate_person_info_box_admin}
return generate_person_info_box[ulevel](ln)
def _generate_tabs(self, ulevel, req):
'''
Generates the tabs content for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@param req: apache request object
@type req: apache request object
@return: tabs content
@rtype: str
'''
from invenio.legacy.bibauthorid.templates import verbiage_dict as tmpl_verbiage_dict
from invenio.legacy.bibauthorid.templates import buttons_verbiage_dict as tmpl_buttons_verbiage_dict
def generate_tabs_guest(req):
links = list() # ['delete', 'commit','del_entry','commit_entry']
tabs = ['records', 'repealed', 'review']
return generate_tabs_admin(req, show_tabs=tabs, ticket_links=links,
open_tickets=list(),
verbiage_dict=tmpl_verbiage_dict['guest'],
buttons_verbiage_dict=tmpl_buttons_verbiage_dict['guest'],
show_reset_button=False)
def generate_tabs_user(req):
links = ['delete', 'del_entry']
tabs = ['records', 'repealed', 'review', 'tickets']
pinfo = session['personinfo']
uid = getUid(req)
user_is_owner = 'not_owner'
if pinfo["claimpaper_admin_last_viewed_pid"] == webapi.get_pid_from_uid(uid):
user_is_owner = 'owner'
open_tickets = webapi.get_person_request_ticket(self.person_id)
tickets = list()
for t in open_tickets:
owns = False
for row in t[0]:
if row[0] == 'uid-ip' and row[1].split('||')[0] == str(uid):
owns = True
if owns:
tickets.append(t)
return generate_tabs_admin(req, show_tabs=tabs, ticket_links=links,
open_tickets=tickets,
verbiage_dict=tmpl_verbiage_dict['user'][user_is_owner],
buttons_verbiage_dict=tmpl_buttons_verbiage_dict['user'][user_is_owner])
def generate_tabs_admin(req, show_tabs=['records', 'repealed', 'review', 'comments', 'tickets', 'data'],
ticket_links=['delete', 'commit', 'del_entry', 'commit_entry'], open_tickets=None,
verbiage_dict=None, buttons_verbiage_dict=None, show_reset_button=True):
personinfo = dict()
try:
personinfo = session["personinfo"]
except KeyError:
return ""
if 'ln' in personinfo:
ln = personinfo["ln"]
else:
ln = CFG_SITE_LANG
all_papers = webapi.get_papers_by_person_id(self.person_id, ext_out=True)
records = [{'recid': paper[0],
'bibref': paper[1],
'flag': paper[2],
'authorname': paper[3],
'authoraffiliation': paper[4],
'paperdate': paper[5],
'rt_status': paper[6],
'paperexperiment': paper[7]} for paper in all_papers]
rejected_papers = [row for row in records if row['flag'] < -1]
rest_of_papers = [row for row in records if row['flag'] >= -1]
review_needed = webapi.get_review_needing_records(self.person_id)
if len(review_needed) < 1:
if 'review' in show_tabs:
show_tabs.remove('review')
if open_tickets == None:
open_tickets = webapi.get_person_request_ticket(self.person_id)
else:
if len(open_tickets) < 1 and 'tickets' in show_tabs:
show_tabs.remove('tickets')
rt_tickets = None
if "admin_requested_ticket_id" in personinfo:
rt_tickets = personinfo["admin_requested_ticket_id"]
if verbiage_dict is None:
verbiage_dict = translate_dict_values(tmpl_verbiage_dict['admin'], ln)
if buttons_verbiage_dict is None:
buttons_verbiage_dict = translate_dict_values(tmpl_buttons_verbiage_dict['admin'], ln)
# send data to the template function
tabs = TEMPLATE.tmpl_admin_tabs(ln, person_id=self.person_id,
rejected_papers=rejected_papers,
rest_of_papers=rest_of_papers,
review_needed=review_needed,
rt_tickets=rt_tickets,
open_rt_tickets=open_tickets,
show_tabs=show_tabs,
ticket_links=ticket_links,
verbiage_dict=verbiage_dict,
buttons_verbiage_dict=buttons_verbiage_dict,
show_reset_button=show_reset_button)
return tabs
def translate_dict_values(dictionary, ln):
def translate_str_values(dictionary, f=lambda x: x):
translated_dict = dict()
for key, value in dictionary.iteritems():
if isinstance(value, str):
translated_dict[key] = f(value)
elif isinstance(value, dict):
translated_dict[key] = translate_str_values(value, f)
else:
raise TypeError("Value should be either string or dictionary.")
return translated_dict
return translate_str_values(dictionary, f=gettext_set_language(ln))
generate_tabs = {'guest': generate_tabs_guest,
'user': generate_tabs_user,
'admin': generate_tabs_admin}
return generate_tabs[ulevel](req)
def _generate_footer(self, ulevel):
'''
Generates the footer for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@return: footer
@rtype: str
'''
def generate_footer_guest():
return TEMPLATE.tmpl_invenio_search_box()
def generate_footer_user():
return generate_footer_guest()
def generate_footer_admin():
return generate_footer_guest()
generate_footer = {'guest': generate_footer_guest,
'user': generate_footer_user,
'admin': generate_footer_admin}
return generate_footer[ulevel]()
def _ticket_dispatch_end(self, req):
'''
The ticket dispatch is finished, redirect to the original page of
origin or to the last_viewed_pid or return to the papers autoassigned box to populate its data
'''
pinfo = session["personinfo"]
webapi.session_bareinit(req)
if 'claim_in_process' in pinfo:
pinfo['claim_in_process'] = False
if "merge_ticket" in pinfo and pinfo['merge_ticket']:
pinfo['merge_ticket'] = []
user_info = collect_user_info(req)
user_info['precached_viewclaimlink'] = True
session.dirty = True
if "referer" in pinfo and pinfo["referer"]:
referer = pinfo["referer"]
del(pinfo["referer"])
session.dirty = True
return redirect_to_url(req, referer)
# if we are coming fromt he autoclaim box we should not redirect and just return to the caller function
if 'autoclaim' in pinfo and pinfo['autoclaim']['review_failed'] == False and pinfo['autoclaim']['begin_autoclaim'] == True:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] = False
session.dirty = True
else:
redirect_page = webapi.history_get_last_visited_url(pinfo['visit_diary'], limit_to_page=['manage_profile', 'claim'])
if not redirect_page:
redirect_page = webapi.get_fallback_redirect_link(req)
if 'autoclaim' in pinfo and pinfo['autoclaim']['review_failed'] == True and pinfo['autoclaim']['checkout'] == True:
redirect_page = '%s/author/claim/action?checkout=True' % (CFG_SITE_URL,)
pinfo['autoclaim']['checkout'] = False
session.dirty = True
elif not 'manage_profile' in redirect_page:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] == False
pinfo['autoclaim']['checkout'] = True
session.dirty = True
redirect_page = '%s/author/claim/%s?open_claim=True' % (CFG_SITE_URL, webapi.get_person_redirect_link(pinfo["claimpaper_admin_last_viewed_pid"]))
else:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] == False
pinfo['autoclaim']['checkout'] = True
session.dirty = True
return redirect_to_url(req, redirect_page)
# redirect_link = diary('get_redirect_link', caller='_ticket_dispatch_end', parameters=[('open_claim','True')])
# return redirect_to_url(req, redirect_link)
# need review if should be deleted
def __user_is_authorized(self, req, action):
'''
Determines if a given user is authorized to perform a specified action
@param req: Apache Request Object
@type req: Apache Request Object
@param action: the action the user wants to perform
@type action: string
@return: True if user is allowed to perform the action, False if not
@rtype: boolean
'''
if not req:
return False
if not action:
return False
else:
action = escape(action)
uid = getUid(req)
if not isinstance(uid, int):
return False
if uid == 0:
return False
allowance = [i[1] for i in acc_find_user_role_actions({'uid': uid})
if i[1] == action]
if allowance:
return True
return False
@staticmethod
def _scripts(kill_browser_cache=False):
'''
Returns html code to be included in the meta header of the html page.
The actual code is stored in the template.
@return: html formatted Javascript and CSS inclusions for the <head>
@rtype: string
'''
return TEMPLATE.tmpl_meta_includes(kill_browser_cache)
def _check_user_fields(self, req, form):
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'user_first_name': (str, None),
'user_last_name': (str, None),
'user_email': (str, None),
'user_comments': (str, None)})
pinfo = session["personinfo"]
ulevel = pinfo["ulevel"]
skip_checkout_faulty_fields = False
if ulevel in ['user', 'admin']:
skip_checkout_faulty_fields = True
if not ("user_first_name_sys" in pinfo and pinfo["user_first_name_sys"]):
if "user_first_name" in argd and argd['user_first_name']:
if not argd["user_first_name"] and not skip_checkout_faulty_fields:
pinfo["checkout_faulty_fields"].append("user_first_name")
else:
pinfo["user_first_name"] = escape(argd["user_first_name"])
if not ("user_last_name_sys" in pinfo and pinfo["user_last_name_sys"]):
if "user_last_name" in argd and argd['user_last_name']:
if not argd["user_last_name"] and not skip_checkout_faulty_fields:
pinfo["checkout_faulty_fields"].append("user_last_name")
else:
pinfo["user_last_name"] = escape(argd["user_last_name"])
if not ("user_email_sys" in pinfo and pinfo["user_email_sys"]):
if "user_email" in argd and argd['user_email']:
if not email_valid_p(argd["user_email"]):
pinfo["checkout_faulty_fields"].append("user_email")
else:
pinfo["user_email"] = escape(argd["user_email"])
if (ulevel == "guest"
and emailUnique(argd["user_email"]) > 0):
pinfo["checkout_faulty_fields"].append("user_email_taken")
else:
pinfo["checkout_faulty_fields"].append("user_email")
if "user_comments" in argd:
if argd["user_comments"]:
pinfo["user_ticket_comments"] = escape(argd["user_comments"])
else:
pinfo["user_ticket_comments"] = ""
session.dirty = True
def action(self, req, form):
'''
Initial step in processing of requests: ticket generation/update.
Also acts as action dispatcher for interface mass action requests.
Valid mass actions are:
- add_external_id: add an external identifier to an author
- add_missing_external_ids: add missing external identifiers of an author
- bibref_check_submit:
- cancel: clean the session (erase tickets and so on)
- cancel_rt_ticket:
- cancel_search_ticket:
- cancel_stage:
- checkout:
- checkout_continue_claiming:
- checkout_remove_transaction:
- checkout_submit:
- claim: claim papers for an author
- commit_rt_ticket:
- confirm: confirm assignments to an author
- delete_external_ids: delete external identifiers of an author
- repeal: repeal assignments from an author
- reset: reset assignments of an author
- set_canonical_name: set/swap the canonical name of an author
- to_other_person: assign a document from an author to another author
@param req: apache request object
@type req: apache request object
@param form: parameters sent via GET or POST request
@type form: dict
@return: a full page formatted in HTML
@return: str
'''
webapi.session_bareinit(req)
pinfo = session["personinfo"]
argd = wash_urlargd(form,
{'autoclaim_show_review':(str, None),
'canonical_name': (str, None),
'existing_ext_ids': (list, None),
'ext_id': (str, None),
'uid': (int, None),
'ext_system': (str, None),
'ln': (str, CFG_SITE_LANG),
'pid': (int, -1),
'primary_profile':(str, None),
'search_param': (str, None),
'rt_action': (str, None),
'rt_id': (int, None),
'selection': (list, None),
# permitted actions
'add_external_id': (str, None),
'set_uid': (str, None),
'add_missing_external_ids': (str, None),
'associate_profile': (str, None),
'bibref_check_submit': (str, None),
'cancel': (str, None),
'cancel_merging': (str, None),
'cancel_rt_ticket': (str, None),
'cancel_search_ticket': (str, None),
'cancel_stage': (str, None),
'checkout': (str, None),
'checkout_continue_claiming': (str, None),
'checkout_remove_transaction': (str, None),
'checkout_submit': (str, None),
'assign': (str, None),
'commit_rt_ticket': (str, None),
'confirm': (str, None),
'delete_external_ids': (str, None),
'merge': (str, None),
'reject': (str, None),
'repeal': (str, None),
'reset': (str, None),
'send_message': (str, None),
'set_canonical_name': (str, None),
'to_other_person': (str, None)})
ulevel = pinfo["ulevel"]
ticket = pinfo["ticket"]
uid = getUid(req)
ln = argd['ln']
action = None
permitted_actions = ['add_external_id',
'set_uid',
'add_missing_external_ids',
'associate_profile',
'bibref_check_submit',
'cancel',
'cancel_merging',
'cancel_rt_ticket',
'cancel_search_ticket',
'cancel_stage',
'checkout',
'checkout_continue_claiming',
'checkout_remove_transaction',
'checkout_submit',
'assign',
'commit_rt_ticket',
'confirm',
'delete_external_ids',
'merge',
'reject',
'repeal',
'reset',
'send_message',
'set_canonical_name',
'to_other_person']
for act in permitted_actions:
# one action (the most) is enabled in the form
if argd[act] is not None:
action = act
no_access = self._page_access_permission_wall(req, None)
if no_access and action not in ["assign"]:
return no_access
# incomplete papers (incomplete paper info or other problems) trigger action function without user's interference
# in order to fix those problems and claim papers or remove them from the ticket
if (action is None
and "bibref_check_required" in pinfo
and pinfo["bibref_check_required"]):
if "bibref_check_reviewed_bibrefs" in pinfo:
del(pinfo["bibref_check_reviewed_bibrefs"])
session.dirty = True
def add_external_id():
'''
associates the user with pid to the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot add external id to unknown person")
if argd['ext_system'] is not None:
ext_sys = argd['ext_system']
else:
return self._error_page(req, ln,
"Fatal: cannot add an external id without specifying the system")
if argd['ext_id'] is not None:
ext_id = argd['ext_id']
else:
return self._error_page(req, ln,
"Fatal: cannot add a custom external id without a suggestion")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.add_person_external_id(pid, ext_sys, ext_id, userinfo)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def set_uid():
'''
associates the user with pid to the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: current user is unknown")
if argd['uid'] is not None:
dest_uid = int(argd['uid'])
else:
return self._error_page(req, ln,
"Fatal: user id is not valid")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.set_person_uid(pid, dest_uid, userinfo)
# remove arxiv pubs of current pid
remove_arxiv_papers_of_author(pid)
dest_uid_pid = webapi.get_pid_from_uid(dest_uid)
if dest_uid_pid > -1:
# move the arxiv pubs of the dest_uid to the current pid
dest_uid_arxiv_papers = webapi.get_arxiv_papers_of_author(dest_uid_pid)
webapi.add_arxiv_papers_to_author(dest_uid_arxiv_papers, pid)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def add_missing_external_ids():
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot recompute external ids for an unknown person")
update_external_ids_of_authors([pid], overwrite=False)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def associate_profile():
'''
associates the user with user id to the person profile with pid
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot associate profile without a person id.")
uid = getUid(req)
pid, profile_claimed = webapi.claim_profile(uid, pid)
redirect_pid = pid
if profile_claimed:
pinfo['pid'] = pid
pinfo['should_check_to_autoclaim'] = True
pinfo["login_info_message"] = "confirm_success"
session.dirty = True
redirect_to_url(req, '%s/author/manage_profile/%s' % (CFG_SITE_URL, redirect_pid))
# if someone have already claimed this profile it redirects to choose_profile with an error message
else:
param=''
if 'search_param' in argd and argd['search_param']:
param = '&search_param=' + argd['search_param']
redirect_to_url(req, '%s/author/choose_profile?failed=%s%s' % (CFG_SITE_URL, True, param))
def bibref_check_submit():
pinfo["bibref_check_reviewed_bibrefs"] = list()
add_rev = pinfo["bibref_check_reviewed_bibrefs"].append
if ("bibrefs_auto_assigned" in pinfo
or "bibrefs_to_confirm" in pinfo):
person_reviews = list()
if ("bibrefs_auto_assigned" in pinfo
and pinfo["bibrefs_auto_assigned"]):
person_reviews.append(pinfo["bibrefs_auto_assigned"])
if ("bibrefs_to_confirm" in pinfo
and pinfo["bibrefs_to_confirm"]):
person_reviews.append(pinfo["bibrefs_to_confirm"])
for ref_review in person_reviews:
for person_id in ref_review:
for bibrec in ref_review[person_id]["bibrecs"]:
rec_grp = "bibrecgroup%s" % bibrec
elements = list()
if rec_grp in form:
if isinstance(form[rec_grp], str):
elements.append(form[rec_grp])
elif isinstance(form[rec_grp], list):
elements += form[rec_grp]
else:
continue
for element in elements:
test = element.split("||")
if test and len(test) > 1 and test[1]:
tref = test[1] + "," + str(bibrec)
tpid = webapi.wash_integer_id(test[0])
if (webapi.is_valid_bibref(tref)
and tpid > -1):
add_rev(element + "," + str(bibrec))
session.dirty = True
def cancel():
self.__session_cleanup(req)
return self._ticket_dispatch_end(req)
def cancel_merging():
'''
empties the session out of merge content and redirects to the manage profile page
that the user was viewing before the merge
'''
if argd['primary_profile']:
primary_cname = argd['primary_profile']
else:
return self._error_page(req, ln,
"Fatal: Couldn't redirect to the previous page")
webapi.session_bareinit(req)
pinfo = session['personinfo']
if pinfo['merge_profiles']:
pinfo['merge_profiles'] = list()
session.dirty = True
redirect_url = "%s/author/manage_profile/%s" % (CFG_SITE_URL, primary_cname)
return redirect_to_url(req, redirect_url)
def cancel_rt_ticket():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln, "Fatal: cannot cancel unknown ticket")
if argd['rt_id'] is not None and argd['rt_action'] is not None:
rt_id = int(argd['rt_id'])
rt_action = argd['rt_action']
for bibrefrec in bibrefrecs:
webapi.delete_transaction_from_request_ticket(pid, rt_id, rt_action, bibrefrec)
else:
rt_id = int(bibrefrecs[0])
webapi.delete_request_ticket(pid, rt_id)
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, pid))
def cancel_search_ticket(without_return=False):
if 'search_ticket' in pinfo:
del(pinfo['search_ticket'])
session.dirty = True
if "claimpaper_admin_last_viewed_pid" in pinfo:
pid = pinfo["claimpaper_admin_last_viewed_pid"]
if not without_return:
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
if not without_return:
return self.search(req, form)
def cancel_stage():
if 'bibref_check_required' in pinfo:
del(pinfo['bibref_check_required'])
if 'bibrefs_auto_assigned' in pinfo:
del(pinfo['bibrefs_auto_assigned'])
if 'bibrefs_to_confirm' in pinfo:
del(pinfo['bibrefs_to_confirm'])
for tt in [row for row in ticket if 'incomplete' in row]:
ticket.remove(tt)
session.dirty = True
return self._ticket_dispatch_end(req)
def checkout():
pass
# return self._ticket_final_review(req)
def checkout_continue_claiming():
pinfo["checkout_faulty_fields"] = list()
self._check_user_fields(req, form)
return self._ticket_dispatch_end(req)
def checkout_remove_transaction():
bibref = argd['checkout_remove_transaction']
if webapi.is_valid_bibref(bibref):
for rmt in [row for row in ticket if row["bibref"] == bibref]:
ticket.remove(rmt)
pinfo["checkout_confirmed"] = False
session.dirty = True
# return self._ticket_final_review(req)
def checkout_submit():
pinfo["checkout_faulty_fields"] = list()
self._check_user_fields(req, form)
if not ticket:
pinfo["checkout_faulty_fields"].append("tickets")
pinfo["checkout_confirmed"] = True
if pinfo["checkout_faulty_fields"]:
pinfo["checkout_confirmed"] = False
session.dirty = True
# return self._ticket_final_review(req)
def claim():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without any bibrefrec")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot claim papers to an unknown person")
if action == 'assign':
claimed_recs = [paper[2] for paper in get_claimed_papers_of_author(pid)]
for bibrefrec in list(bibrefrecs):
_, rec = webapi.split_bibrefrec(bibrefrec)
if rec in claimed_recs:
bibrefrecs.remove(bibrefrec)
for bibrefrec in bibrefrecs:
operation_parts = {'pid': pid,
'action': action,
'bibrefrec': bibrefrec}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
continue
ticket = pinfo['ticket']
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def claim_to_other_person():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without any bibrefrec")
return self._ticket_open_assign_to_other_person(req, bibrefrecs, form)
def commit_rt_ticket():
if argd['selection'] is not None:
tid = argd['selection'][0]
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
return self._commit_rt_ticket(req, tid, pid)
def confirm_repeal_reset():
if argd['pid'] > -1 or int(argd['pid']) == CREATE_NEW_PERSON:
pid = argd['pid']
cancel_search_ticket(without_return = True)
else:
return self._ticket_open_assign_to_other_person(req, argd['selection'], form)
#return self._error_page(req, ln, "Fatal: cannot create ticket without a person id! (crr %s)" %repr(argd))
bibrefrecs = argd['selection']
if argd['confirm']:
action = 'assign'
elif argd['repeal']:
action = 'reject'
elif argd['reset']:
action = 'reset'
else:
return self._error_page(req, ln, "Fatal: not existent action!")
for bibrefrec in bibrefrecs:
form['jsondata'] = json.dumps({'pid': str(pid),
'action': action,
'bibrefrec': bibrefrec,
'on': 'user'})
t = WebInterfaceAuthorTicketHandling()
t.add_operation(req, form)
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def delete_external_ids():
'''
deletes association between the user with pid and the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot delete external ids from an unknown person")
if argd['existing_ext_ids'] is not None:
existing_ext_ids = argd['existing_ext_ids']
else:
return self._error_page(req, ln,
"Fatal: you must select at least one external id in order to delete it")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.delete_person_external_ids(pid, existing_ext_ids, userinfo)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def none_action():
return self._error_page(req, ln,
"Fatal: cannot create ticket if no action selected.")
def merge():
'''
performs a merge if allowed on the profiles that the user chose
'''
if argd['primary_profile']:
primary_cname = argd['primary_profile']
else:
return self._error_page(req, ln,
"Fatal: cannot perform a merge without a primary profile!")
if argd['selection']:
profiles_to_merge = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot perform a merge without any profiles selected!")
webapi.session_bareinit(req)
pinfo = session['personinfo']
uid = getUid(req)
primary_pid = webapi.get_person_id_from_canonical_id(primary_cname)
pids_to_merge = [webapi.get_person_id_from_canonical_id(cname) for cname in profiles_to_merge]
is_admin = False
if pinfo['ulevel'] == 'admin':
is_admin = True
# checking if there are restrictions regarding this merge
can_perform_merge, preventing_pid = webapi.merge_is_allowed(primary_pid, pids_to_merge, is_admin)
if not can_perform_merge:
# when redirected back to the merge profiles page display an error message about the currently attempted merge
pinfo['merge_info_message'] = ("failure", "confirm_failure")
session.dirty = True
redirect_url = "%s/author/merge_profiles?primary_profile=%s" % (CFG_SITE_URL, primary_cname)
return redirect_to_url(req, redirect_url)
if is_admin:
webapi.merge_profiles(primary_pid, pids_to_merge)
# when redirected back to the manage profile page display a message about the currently attempted merge
pinfo['merge_info_message'] = ("success", "confirm_success")
else:
name = ''
if 'user_last_name' in pinfo:
name = pinfo['user_last_name']
if 'user_first_name' in pinfo:
name += pinfo['user_first_name']
email = ''
if 'user_email' in pinfo:
email = pinfo['user_email']
selection_str = "&selection=".join(profiles_to_merge)
userinfo = {'uid-ip': "userid: %s (from %s)" % (uid, req.remote_ip),
'name': name,
'email': email,
'merge link': "%s/author/merge_profiles?primary_profile=%s&selection=%s" % (CFG_SITE_URL, primary_cname, selection_str)}
# a message is sent to the admin with info regarding the currently attempted merge
webapi.create_request_message(userinfo, subj='Merge profiles request')
# when redirected back to the manage profile page display a message about the merge
pinfo['merge_info_message'] = ("success", "confirm_operation")
pinfo['merge_profiles'] = list()
session.dirty = True
redirect_url = "%s/author/manage_profile/%s" % (CFG_SITE_URL, primary_cname)
return redirect_to_url(req, redirect_url)
def send_message():
'''
sends a message from the user to the admin
'''
webapi.session_bareinit(req)
pinfo = session['personinfo']
#pp = pprint.PrettyPrinter(indent=4)
#session_dump = pp.pprint(pinfo)
session_dump = str(pinfo)
name = ''
name_changed = False
name_given = ''
email = ''
email_changed = False
email_given = ''
comment = ''
last_page_visited = ''
if "user_last_name" in pinfo:
name = pinfo["user_last_name"]
if "user_first_name" in pinfo:
name += pinfo["user_first_name"]
name = name.rstrip()
if "user_email" in pinfo:
email = pinfo["user_email"]
email = email.rstrip()
if 'Name' in form:
if not name:
name = form['Name']
elif name != form['Name']:
name_given = form['Name']
name_changed = True
name = name.rstrip()
if 'E-mail'in form:
if not email:
email = form['E-mail']
elif name != form['E-mail']:
email_given = form['E-mail']
email_changed = True
email = email.rstrip()
if 'Comment' in form:
comment = form['Comment']
comment = comment.rstrip()
if not name or not comment or not email:
redirect_to_url(req, '%s/author/help?incomplete_params=%s' % (CFG_SITE_URL, True))
if 'last_page_visited' in form:
last_page_visited = form['last_page_visited']
uid = getUid(req)
userinfo = {'uid-ip': "userid: %s (from %s)" % (uid, req.remote_ip),
'name': name,
'email': email,
'comment': comment,
'last_page_visited': last_page_visited,
'session_dump': session_dump,
'name_given': name_given,
'email_given': email_given,
'name_changed': name_changed,
'email_changed': email_changed}
webapi.create_request_message(userinfo)
def set_canonical_name():
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot set canonical name to unknown person")
if argd['canonical_name'] is not None:
cname = argd['canonical_name']
else:
return self._error_page(req, ln,
"Fatal: cannot set a custom canonical name without a suggestion")
userinfo = "%s||%s" % (uid, req.remote_ip)
if webapi.is_valid_canonical_id(cname):
webapi.swap_person_canonical_name(pid, cname, userinfo)
else:
webapi.update_person_canonical_name(pid, cname, userinfo)
return redirect_to_url(req, "%s/author/claim/%s%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid), '#tabData'))
action_functions = {'add_external_id': add_external_id,
'set_uid': set_uid,
'add_missing_external_ids': add_missing_external_ids,
'associate_profile': associate_profile,
'bibref_check_submit': bibref_check_submit,
'cancel': cancel,
'cancel_merging': cancel_merging,
'cancel_rt_ticket': cancel_rt_ticket,
'cancel_search_ticket': cancel_search_ticket,
'cancel_stage': cancel_stage,
'checkout': checkout,
'checkout_continue_claiming': checkout_continue_claiming,
'checkout_remove_transaction': checkout_remove_transaction,
'checkout_submit': checkout_submit,
'assign': claim,
'commit_rt_ticket': commit_rt_ticket,
'confirm': confirm_repeal_reset,
'delete_external_ids': delete_external_ids,
'merge': merge,
'reject': claim,
'repeal': confirm_repeal_reset,
'reset': confirm_repeal_reset,
'send_message': send_message,
'set_canonical_name': set_canonical_name,
'to_other_person': claim_to_other_person,
None: none_action}
return action_functions[action]()
def _ticket_open_claim(self, req, bibrefs, ln):
'''
Generate page to let user choose how to proceed
@param req: Apache Request Object
@type req: Apache Request Object
@param bibrefs: list of record IDs to perform an action on
@type bibrefs: list of int
@param ln: language to display the page in
@type ln: string
'''
uid = getUid(req)
uinfo = collect_user_info(req)
pinfo = session["personinfo"]
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
no_access = self._page_access_permission_wall(req)
session.dirty = True
pid = -1
search_enabled = True
if not no_access and uinfo["precached_usepaperclaim"]:
tpid = webapi.get_pid_from_uid(uid)
if tpid > -1:
pid = tpid
last_viewed_pid = False
if (not no_access
and "claimpaper_admin_last_viewed_pid" in pinfo
and pinfo["claimpaper_admin_last_viewed_pid"]):
names = webapi.get_person_names_from_id(pinfo["claimpaper_admin_last_viewed_pid"])
names = sorted([i for i in names], key=lambda k: k[1], reverse=True)
if len(names) > 0:
if len(names[0]) > 0:
last_viewed_pid = [pinfo["claimpaper_admin_last_viewed_pid"], names[0][0]]
if no_access:
search_enabled = False
pinfo["referer"] = uinfo["referer"]
session.dirty = True
body = TEMPLATE.tmpl_open_claim(bibrefs, pid, last_viewed_pid,
search_enabled=search_enabled)
body = TEMPLATE.tmpl_person_detail_layout(body)
title = _('Claim this paper')
metaheaderadd = WebInterfaceBibAuthorIDClaimPages._scripts(kill_browser_cache=True)
return page(title=title,
metaheaderadd=metaheaderadd,
body=body,
req=req,
language=ln)
def _ticket_open_assign_to_other_person(self, req, bibrefs, form):
'''
Initializes search to find a person to attach the selected records to
@param req: Apache request object
@type req: Apache request object
@param bibrefs: list of record IDs to consider
@type bibrefs: list of int
@param form: GET/POST request parameters
@type form: dict
'''
pinfo = session["personinfo"]
pinfo["search_ticket"] = dict()
search_ticket = pinfo["search_ticket"]
search_ticket['action'] = 'assign'
search_ticket['bibrefs'] = bibrefs
session.dirty = True
return self.search(req, form)
def _cancel_rt_ticket(self, req, tid, pid):
'''
deletes an RT ticket
'''
webapi.delete_request_ticket(pid, tid)
return redirect_to_url(req, "%s/author/claim/%s" %
(CFG_SITE_URL, webapi.get_person_redirect_link(str(pid))))
def _cancel_transaction_from_rt_ticket(self, tid, pid, action, bibref):
'''
deletes a transaction from an rt ticket
'''
webapi.delete_transaction_from_request_ticket(pid, tid, action, bibref)
def _commit_rt_ticket(self, req, tid, pid):
'''
Commit of an rt ticket: creates a real ticket and commits.
'''
pinfo = session["personinfo"]
ticket = pinfo["ticket"]
uid = getUid(req)
tid = int(tid)
rt_ticket = get_validated_request_tickets_for_author(pid, tid)[0]
for action, bibrefrec in rt_ticket['operations']:
operation_parts = {'pid': pid,
'action': action,
'bibrefrec': bibrefrec}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
webapi.delete_request_ticket(pid, tid)
redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, pid))
def _error_page(self, req, ln=CFG_SITE_LANG, message=None, intro=True):
'''
Create a page that contains a message explaining the error.
@param req: Apache Request Object
@type req: Apache Request Object
@param ln: language
@type ln: string
@param message: message to be displayed
@type message: string
'''
body = []
_ = gettext_set_language(ln)
if not message:
message = "No further explanation available. Sorry."
if intro:
body.append(_("<p>We're sorry. An error occurred while "
"handling your request. Please find more information "
"below:</p>"))
body.append("<p><strong>%s</strong></p>" % message)
return page(title=_("Notice"),
body="\n".join(body),
description="%s - Internal Error" % CFG_SITE_NAME,
keywords="%s, Internal Error" % CFG_SITE_NAME,
language=ln,
req=req)
def __session_cleanup(self, req):
'''
Cleans the session from all bibauthorid specific settings and
with that cancels any transaction currently in progress.
@param req: Apache Request Object
@type req: Apache Request Object
'''
try:
pinfo = session["personinfo"]
except KeyError:
return
if "ticket" in pinfo:
pinfo['ticket'] = []
if "search_ticket" in pinfo:
pinfo['search_ticket'] = dict()
# clear up bibref checker if it's done.
if ("bibref_check_required" in pinfo
and not pinfo["bibref_check_required"]):
if 'bibrefs_to_confirm' in pinfo:
del(pinfo['bibrefs_to_confirm'])
if "bibrefs_auto_assigned" in pinfo:
del(pinfo["bibrefs_auto_assigned"])
del(pinfo["bibref_check_required"])
if "checkout_confirmed" in pinfo:
del(pinfo["checkout_confirmed"])
if "checkout_faulty_fields" in pinfo:
del(pinfo["checkout_faulty_fields"])
# pinfo['ulevel'] = ulevel
# pinfo["claimpaper_admin_last_viewed_pid"] = -1
pinfo["admin_requested_ticket_id"] = -1
session.dirty = True
def _generate_search_ticket_box(self, req):
'''
Generate the search ticket to remember a pending search for Person
entities in an attribution process
@param req: Apache request object
@type req: Apache request object
'''
pinfo = session["personinfo"]
search_ticket = None
if 'search_ticket' in pinfo:
search_ticket = pinfo['search_ticket']
if not search_ticket:
return ''
else:
return TEMPLATE.tmpl_search_ticket_box('person_search', 'assign_papers', search_ticket['bibrefs'])
def search_box(self, query, shown_element_functions):
'''
collecting the persons' data that the search function returned
@param req: Apache request object
@type req: Apache request object
@param query: the query string
@type query: string
@param shown_element_functions: contains the functions that will tell to the template which columns to show and what buttons to print
@type shown_element_functions: dict
@return: html body
@rtype: string
'''
pid_list = self._perform_search(query)
search_results = []
for pid in pid_list:
result = defaultdict(list)
result['pid'] = pid
result['canonical_id'] = webapi.get_canonical_id_from_person_id(pid)
result['name_variants'] = webapi.get_person_names_from_id(pid)
result['external_ids'] = webapi.get_external_ids_from_person_id(pid)
# this variable shows if we want to use the following data in the search template
if 'pass_status' in shown_element_functions and shown_element_functions['pass_status']:
result['status'] = webapi.is_profile_available(pid)
search_results.append(result)
body = TEMPLATE.tmpl_author_search(query, search_results, shown_element_functions)
body = TEMPLATE.tmpl_person_detail_layout(body)
return body
def search(self, req, form):
'''
Function used for searching a person based on a name with which the
function is queried.
@param req: Apache Request Object
@type form: dict
@return: a full page formatted in HTML
@rtype: string
'''
webapi.session_bareinit(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
person_id = self.person_id
uid = getUid(req)
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0),
'q': (str, None)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
try:
int(cname)
except ValueError:
is_owner = False
else:
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "search", ln, is_owner, self._is_admin(pinfo))
title = "Person search"
# Create Wrapper Page Markup
profile_page = WebProfilePage("search", title, no_cache=True)
profile_page.add_profile_menu(menu)
profile_page.add_bootstrapped_data(json.dumps({
"other": "var gBOX_STATUS = '10';var gPID = '10'; var gNumOfWorkers= '10'; var gReqTimeout= '10'; var gPageTimeout= '10';",
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s"});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
no_access = self._page_access_permission_wall(req)
shown_element_functions = dict()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_general_search_bar()
if no_access:
return no_access
search_ticket = None
bibrefs = []
if 'search_ticket' in pinfo:
search_ticket = pinfo['search_ticket']
for r in search_ticket['bibrefs']:
bibrefs.append(r)
if search_ticket and "ulevel" in pinfo:
if pinfo["ulevel"] == "admin":
shown_element_functions['new_person_gen'] = TEMPLATE.tmpl_assigning_search_new_person_generator(bibrefs)
content = ""
if search_ticket:
shown_element_functions['button_gen'] = TEMPLATE.tmpl_assigning_search_button_generator(bibrefs)
content = content + self._generate_search_ticket_box(req)
query = None
if 'q' in argd:
if argd['q']:
query = escape(argd['q'])
content += self.search_box(query, shown_element_functions)
body = profile_page.get_wrapped_body(content)
parameter = None
if query:
parameter = '?search_param=%s' + query
webapi.history_log_visit(req, 'search', params = parameter)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def merge_profiles(self, req, form):
'''
begginig of the proccess that performs the merge over multipe person profiles
@param req: Apache Request Object
@type form: dict
@return: a full page formatted in HTML
@rtype: string
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'primary_profile': (str, None),
'search_param': (str, ''),
'selection': (list, None),
'verbose': (int, 0)})
ln = argd['ln']
primary_cname = argd['primary_profile']
search_param = argd['search_param']
selection = argd['selection']
debug = 'verbose' in argd and argd['verbose'] > 0
webapi.session_bareinit(req)
pinfo = session['personinfo']
profiles_to_merge = pinfo['merge_profiles']
_ = gettext_set_language(ln)
if not primary_cname:
return page_not_authorized(req, text=_('This page is not accessible directly.'))
no_access = self._page_access_permission_wall(req)
if no_access:
return no_access
if selection is not None:
profiles_to_merge_session = [cname for cname, is_available in profiles_to_merge]
for profile in selection:
if profile not in profiles_to_merge_session:
pid = webapi.get_person_id_from_canonical_id(profile)
is_available = webapi.is_profile_available(pid)
pinfo['merge_profiles'].append([profile, '1' if is_available else '0'])
session.dirty = True
primary_pid = webapi.get_person_id_from_canonical_id(primary_cname)
is_available = webapi.is_profile_available(primary_pid)
body = ''
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
title = 'Merge Profiles'
menu = WebProfileMenu(str(cname), "manage_profile", ln, is_owner, self._is_admin(pinfo))
merge_page = WebProfilePage("merge_profile", title, no_cache=True)
merge_page.add_profile_menu(menu)
if debug:
merge_page.add_debug_info(pinfo)
# display status for any previously attempted merge
if pinfo['merge_info_message']:
teaser_key, message = pinfo['merge_info_message']
body += TEMPLATE.tmpl_merge_transaction_box(teaser_key, [message])
pinfo['merge_info_message'] = None
session.dirty = True
body += TEMPLATE.tmpl_merge_ticket_box('person_search', 'merge_profiles', primary_cname)
shown_element_functions = dict()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_merge_profiles_search_bar(primary_cname)
shown_element_functions['button_gen'] = TEMPLATE.merge_profiles_button_generator()
shown_element_functions['pass_status'] = 'True'
merge_page.add_bootstrapped_data(json.dumps({
"other": "var gMergeProfile = %s; var gMergeList = %s;" % ([primary_cname, '1' if is_available else '0'], profiles_to_merge)
}))
body += self.search_box(search_param, shown_element_functions)
body = merge_page.get_wrapped_body(body)
return page(title=title,
metaheaderadd=merge_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _perform_search(self, search_param):
'''
calls the search function on the search_param and returns the results
@param search_param: query string
@type search_param: String
@return: list of pids that the search found they match with the search query
@return: list
'''
pid_canditates_list = []
nquery = None
if search_param:
if search_param.count(":"):
try:
left, right = search_param.split(":")
try:
nsearch_param = str(right)
except (ValueError, TypeError):
try:
nsearch_param = str(left)
except (ValueError, TypeError):
nsearch_param = search_param
except ValueError:
nsearch_param = search_param
else:
nsearch_param = search_param
sorted_results = webapi.search_person_ids_by_name(nsearch_param)
for result in sorted_results:
pid_canditates_list.append(result[0])
return pid_canditates_list
def merge_profiles_ajax(self, req, form):
'''
Function used for handling Ajax requests used in order to add/remove profiles
in/from the merging profiles list, which is saved in the session.
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via Ajax request
@type form: dict
@return: json data
'''
# Abort if the simplejson module isn't available
if not CFG_JSON_AVAILABLE:
print "Json not configurable"
# If it is an Ajax request, extract any JSON data.
ajax_request = False
# REcent papers request
if form.has_key('jsondata'):
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
json_response = {'resultCode': 0}
# Handle request.
if ajax_request:
req_type = json_data['requestType']
if req_type == 'addProfile':
if json_data.has_key('profile'):
profile = json_data['profile']
person_id = webapi.get_person_id_from_canonical_id(profile)
if person_id != -1:
webapi.session_bareinit(req)
profiles_to_merge = session["personinfo"]["merge_profiles"]
profile_availability = webapi.is_profile_available(person_id)
if profile_availability:
profile_availability = "1"
else:
profile_availability = "0"
if profile not in [el[0] for el in profiles_to_merge]:
profiles_to_merge.append([profile, profile_availability])
session.dirty = True
# TODO check access rights and get profile from db
json_response.update({'resultCode': 1})
json_response.update({'addedPofile': profile})
json_response.update({'addedPofileAvailability': profile_availability})
else:
json_response.update({'result': 'Error: Profile does not exist'})
else:
json_response.update({'result': 'Error: Profile was already in the list'})
else:
json_response.update({'result': 'Error: Missing profile'})
elif req_type == 'removeProfile':
if json_data.has_key('profile'):
profile = json_data['profile']
if webapi.get_person_id_from_canonical_id(profile) != -1:
webapi.session_bareinit(req)
profiles_to_merge = session["personinfo"]["merge_profiles"]
# print (str(profiles_to_merge))
if profile in [el[0] for el in profiles_to_merge]:
for prof in list(profiles_to_merge):
if prof[0] == profile:
profiles_to_merge.remove(prof)
session.dirty = True
# TODO check access rights and get profile from db
json_response.update({'resultCode': 1})
json_response.update({'removedProfile': profile})
else:
json_response.update({'result': 'Error: Profile was missing already from the list'})
else:
json_response.update({'result': 'Error: Profile does not exist'})
else:
json_response.update({'result': 'Error: Missing profile'})
elif req_type == 'setPrimaryProfile':
if json_data.has_key('profile'):
profile = json_data['profile']
profile_id = webapi.get_person_id_from_canonical_id(profile)
if profile_id != -1:
webapi.session_bareinit(req)
profile_availability = webapi.is_profile_available(profile_id)
if profile_availability:
profile_availability = "1"
else:
profile_availability = "0"
profiles_to_merge = session["personinfo"]["merge_profiles"]
if profile in [el[0] for el in profiles_to_merge if el and el[0]]:
for prof in list(profiles_to_merge):
if prof[0] == profile:
profiles_to_merge.remove(prof)
primary_profile = session["personinfo"]["merge_primary_profile"]
if primary_profile not in profiles_to_merge:
profiles_to_merge.append(primary_profile)
session["personinfo"]["merge_primary_profile"] = [profile, profile_availability]
session.dirty = True
json_response.update({'resultCode': 1})
json_response.update({'primaryProfile': profile})
json_response.update({'primaryPofileAvailability': profile_availability})
else:
json_response.update({'result': 'Error: Profile was already in the list'})
else:
json_response.update({'result': 'Error: Missing profile'})
else:
json_response.update({'result': 'Error: Wrong request type'})
return json.dumps(json_response)
def search_box_ajax(self, req, form):
'''
Function used for handling Ajax requests used in the search box.
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via Ajax request
@type form: dict
@return: json data
'''
# Abort if the simplejson module isn't available
if not CFG_JSON_AVAILABLE:
print "Json not configurable"
# If it is an Ajax request, extract any JSON data.
ajax_request = False
# REcent papers request
if form.has_key('jsondata'):
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
json_response = {'resultCode': 0}
# Handle request.
if ajax_request:
req_type = json_data['requestType']
if req_type == 'getPapers':
if json_data.has_key('personId'):
pId = json_data['personId']
papers = sorted([[p[0]] for p in webapi.get_papers_by_person_id(int(pId), -1)],
key=itemgetter(0))
papers_html = TEMPLATE.tmpl_gen_papers(papers[0:MAX_NUM_SHOW_PAPERS])
json_response.update({'result': "\n".join(papers_html)})
json_response.update({'totalPapers': len(papers)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
else:
json_response.update({'result': 'Error: Missing person id'})
elif req_type == 'getNames':
if json_data.has_key('personId'):
pId = json_data['personId']
names = webapi.get_person_names_from_id(int(pId))
names_html = TEMPLATE.tmpl_gen_names(names)
json_response.update({'result': "\n".join(names_html)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
elif req_type == 'getIDs':
if json_data.has_key('personId'):
pId = json_data['personId']
ids = webapi.get_external_ids_from_person_id(int(pId))
ids_html = TEMPLATE.tmpl_gen_ext_ids(ids)
json_response.update({'result': "\n".join(ids_html)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
elif req_type == 'isProfileClaimed':
if json_data.has_key('personId'):
pId = json_data['personId']
isClaimed = webapi.get_uid_from_personid(pId)
if isClaimed != -1:
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
else:
json_response.update({'result': 'Error: Wrong request type'})
return json.dumps(json_response)
def choose_profile(self, req, form):
'''
Generate SSO landing/choose_profile page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'search_param': (str, None),
'failed': (str, None),
'verbose': (int, 0)})
ln = argd['ln']
debug = "verbose" in argd and argd["verbose"] > 0
req.argd = argd # needed for perform_req_search
search_param = argd['search_param']
webapi.session_bareinit(req)
uid = getUid(req)
pinfo = session['personinfo']
failed = True
if not argd['failed']:
failed = False
_ = gettext_set_language(ln)
if not CFG_INSPIRE_SITE:
return page_not_authorized(req, text=_("This page is not accessible directly."))
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_info = webapi.get_login_info(uid, params)
if 'arXiv' not in login_info['logged_in_to_remote_systems']:
return page_not_authorized(req, text=_("This page is not accessible directly."))
pid = webapi.get_user_pid(login_info['uid'])
# Create Wrapper Page Markup
is_owner = False
menu = WebProfileMenu('', "choose_profile", ln, is_owner, self._is_admin(pinfo))
choose_page = WebProfilePage("choose_profile", "Choose your profile", no_cache=True)
choose_page.add_profile_menu(menu)
if debug:
choose_page.add_debug_info(pinfo)
content = TEMPLATE.tmpl_choose_profile(failed)
body = choose_page.get_wrapped_body(content)
#In any case, when we step by here, an autoclaim should be performed right after!
pinfo = session["personinfo"]
pinfo['should_check_to_autoclaim'] = True
session.dirty = True
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
# if already logged in then redirect the user to the page he was viewing
if pid != -1:
redirect_pid = pid
if last_visited_pid:
redirect_pid = last_visited_pid
redirect_to_url(req, '%s/author/manage_profile/%s' % (CFG_SITE_URL, str(redirect_pid)))
else:
# get name strings and email addresses from SSO/Oauth logins: {'system':{'name':[variant1,...,variantn], 'email':'blabla@bla.bla', 'pants_size':20}}
remote_login_systems_info = webapi.get_remote_login_systems_info(req, login_info['logged_in_to_remote_systems'])
# get union of recids that are associated to the ids from all the external systems: set(inspire_recids_list)
recids = webapi.get_remote_login_systems_recids(req, login_info['logged_in_to_remote_systems'])
# this is the profile with the biggest intersection of papers so it's more probable that this is the profile the user seeks
probable_pid = webapi.match_profile(req, recids, remote_login_systems_info)
# if not search_param and probable_pid > -1 and probable_pid == last_visited_pid:
# # try to assign the user to the profile he chose. If for some reason the profile is not available we assign him to an empty profile
# redirect_pid, profile_claimed = webapi.claim_profile(login_info['uid'], probable_pid)
# if profile_claimed:
# redirect_to_url(req, '%s/author/claim/action?associate_profile=True&redirect_pid=%s' % (CFG_SITE_URL, str(redirect_pid)))
probable_profile_suggestion_info = None
last_viewed_profile_suggestion_info = None
if last_visited_pid > -1 and webapi.is_profile_available(last_visited_pid):
# get information about the most probable profile and show it to the user
last_viewed_profile_suggestion_info = webapi.get_profile_suggestion_info(req, last_visited_pid, recids)
if probable_pid > -1 and webapi.is_profile_available(probable_pid):
# get information about the most probable profile and show it to the user
probable_profile_suggestion_info = webapi.get_profile_suggestion_info(req, probable_pid, recids )
if not search_param:
# we prefil the search with most relevant among the names that we get from external systems
name_variants = webapi.get_name_variants_list_from_remote_systems_names(remote_login_systems_info)
search_param = most_relevant_name(name_variants)
body = body + TEMPLATE.tmpl_probable_profile_suggestion(probable_profile_suggestion_info, last_viewed_profile_suggestion_info, search_param)
shown_element_functions = dict()
shown_element_functions['button_gen'] = TEMPLATE.tmpl_choose_profile_search_button_generator()
shown_element_functions['new_person_gen'] = TEMPLATE.tmpl_choose_profile_search_new_person_generator()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_choose_profile_search_bar()
# show in the templates the column status (if profile is bound to a user or not)
shown_element_functions['show_status'] = True
# pass in the templates the data of the column status (if profile is bound to a user or not)
# we might need the data without having to show them in the columne (fi merge_profiles
shown_element_functions['pass_status'] = True
# show search results to the user
body = body + self.search_box(search_param, shown_element_functions)
body = body + TEMPLATE.tmpl_choose_profile_footer()
title = _(' ')
return page(title=title,
metaheaderadd=choose_page.get_head().encode('utf-8'),
body=body,
req=req,
language=ln)
@staticmethod
def _arxiv_box(req, login_info, person_id, user_pid):
'''
Proccess and collect data for arXiv box
@param req: Apache request object
@type req: Apache request object
@param login_info: status of login in the following format: {'logged_in': True, 'uid': 2, 'logged_in_to_remote_systems':['Arxiv', ...]}
@type login_info: dict
@param login_info: person id of the current page's profile
@type login_info: int
@param login_info: person id of the user
@type login_info: int
@return: data required to built the arXiv box
@rtype: dict
'''
pinfo = session["personinfo"]
arxiv_data = dict()
arxiv_data['view_own_profile'] = person_id == user_pid
# if the user is not a guest and he is connected through arXiv
arxiv_data['login'] = login_info['logged_in']
arxiv_data['user_pid'] = user_pid
arxiv_data['user_has_pid'] = user_pid != -1
# if the profile the use is logged in is the same with the profile of the page that the user views
arxiv_data['view_own_profile'] = user_pid == person_id
return arxiv_data
@staticmethod
def _orcid_box(arxiv_logged_in, person_id, user_pid, ulevel):
'''
Proccess and collect data for orcid box
@param req: Apache request object
@type req: Apache request object
@param arxiv_logged_in: shows if the user is logged in through arXiv or not
@type arxiv_logged_in: boolean
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param ulevel: user's level
@type ulevel: string
@return: data required to built the orcid box
@rtype: dict
'''
orcid_data = dict()
orcid_data['arxiv_login'] = arxiv_logged_in
orcid_data['orcids'] = None
orcid_data['add_power'] = False
orcid_data['own_profile'] = False
orcid_data['pid'] = person_id
# if the profile the use is logged in is the same with the profile of the page that the user views
if person_id == user_pid:
orcid_data['own_profile'] = True
# if the user is an admin then he can add an existing orcid to the profile
if ulevel == "admin":
orcid_data['add_power'] = True
orcids = webapi.get_orcids_by_pid(person_id)
if orcids:
orcid_data['orcids'] = orcids
return orcid_data
@staticmethod
def _autoclaim_papers_box(req, person_id, user_pid, remote_logged_in_systems):
'''
Proccess and collect data for orcid box
@param req: Apache request object
@type req: Apache request object
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the autoclaim box
@rtype: dict
'''
autoclaim_data = dict()
# if no autoclaim should occur or had occured and results should be shown then the box should remain hidden
autoclaim_data['hidden'] = True
autoclaim_data['person_id'] = person_id
# if the profile the use is logged in is the same with the profile of the page that the user views
if person_id == user_pid:
recids_to_autoclaim = webapi.get_remote_login_systems_recids(req, remote_logged_in_systems)
autoclaim_data['hidden'] = False
autoclaim_data['num_of_claims'] = len(recids_to_autoclaim)
return autoclaim_data
############################################
# New autoclaim functions #
############################################
def generate_autoclaim_data(self, req, form):
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
pid = int(json_data['personId'])
except:
raise NotImplementedError("Some error with the parameter from the Ajax request occured.")
webapi.session_bareinit(req)
pinfo = session['personinfo']
# If autoclaim was done already and no new remote systems exist
# in order to autoclaim new papers send the cached result
if not pinfo['orcid']['import_pubs'] and pinfo['autoclaim']['res'] is not None:
autoclaim_data = pinfo['autoclaim']['res']
json_response = {'resultCode': 1, 'result': TEMPLATE.tmpl_autoclaim_box(autoclaim_data, CFG_SITE_LANG, add_box=False, loading=False)}
return json.dumps(json_response)
external_pubs_association = pinfo['autoclaim']['external_pubs_association']
autoclaim_ticket = pinfo['autoclaim']['ticket']
ulevel = pinfo['ulevel']
uid = getUid(req)
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_status = webapi.get_login_info(uid, params)
remote_systems = login_status['logged_in_to_remote_systems']
papers_to_autoclaim = set(webapi.get_papers_from_remote_systems(remote_systems, params, external_pubs_association))
already_claimed_recids = set([rec for _, _, rec in get_claimed_papers_of_author(pid)]) & papers_to_autoclaim
papers_to_autoclaim = papers_to_autoclaim - set([rec for _, _, rec in get_claimed_papers_of_author(pid)])
for paper in papers_to_autoclaim:
operation_parts = {'pid': pid,
'action': 'assign',
'bibrefrec': str(paper)}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
# In case the operation could not be created (because of an
# erroneous bibrefrec) ignore it and continue with the rest
continue
webapi.add_operation_to_ticket(operation_to_be_added, autoclaim_ticket)
additional_info = {'first_name': '', 'last_name': '', 'email': '',
'comments': 'Assigned automatically when autoclaim was triggered.'}
userinfo = webapi.fill_out_userinfo(additional_info, uid, req.remote_ip, ulevel, strict_check=False)
webapi.commit_operations_from_ticket(autoclaim_ticket, userinfo, uid, ulevel)
autoclaim_data = dict()
autoclaim_data['hidden'] = False
autoclaim_data['person_id'] = pid
autoclaim_data['successfull_recids'] = set([op['rec'] for op in webapi.get_ticket_status(autoclaim_ticket) if 'execution_result' in op]) | already_claimed_recids
webapi.clean_ticket(autoclaim_ticket)
autoclaim_data['unsuccessfull_recids'] = [op['rec'] for op in webapi.get_ticket_status(autoclaim_ticket)]
autoclaim_data['num_of_unsuccessfull_recids'] = len(autoclaim_data['unsuccessfull_recids'])
autoclaim_data['recids_to_external_ids'] = dict()
for key, value in external_pubs_association.iteritems():
ext_system, ext_id = key
rec = value
title = get_title_of_paper(rec)
autoclaim_data['recids_to_external_ids'][rec] = title
# cache the result in the session
pinfo['autoclaim']['res'] = autoclaim_data
if pinfo['orcid']['import_pubs']:
pinfo['orcid']['import_pubs'] = False
session.dirty = True
json_response = {'resultCode': 1, 'result': TEMPLATE.tmpl_autoclaim_box(autoclaim_data, CFG_SITE_LANG, add_box=False, loading=False)}
req.write(json.dumps(json_response))
@staticmethod
def get_params_to_check_login_info(session):
def get_params_to_check_login_info_of_arxiv(session):
try:
return session['user_info']
except KeyError:
return None
def get_params_to_check_login_info_of_orcid(session):
pinfo = session['personinfo']
try:
pinfo['orcid']['has_orcid_id'] = bool(get_orcid_id_of_author(pinfo['pid'])[0][0] and pinfo['orcid']['import_pubs'])
except:
pinfo['orcid']['has_orcid_id'] = False
session.dirty = True
return pinfo['orcid']
get_params_for_remote_system = {'arXiv': get_params_to_check_login_info_of_arxiv,
'orcid': get_params_to_check_login_info_of_orcid}
params = dict()
for system, get_params in get_params_for_remote_system.iteritems():
params[system] = get_params(session)
return params
@staticmethod
def _claim_paper_box(person_id):
'''
Proccess and collect data for claim paper box
@param person_id: person id of the current page's profile
@type person_id: int
@return: data required to built the claim paper box
@rtype: dict
'''
claim_paper_data = dict()
claim_paper_data['canonical_id'] = str(webapi.get_canonical_id_from_person_id(person_id))
return claim_paper_data
@staticmethod
def _support_box():
'''
Proccess and collect data for support box
@return: data required to built the support box
@rtype: dict
'''
support_data = dict()
return support_data
@staticmethod
def _merge_box(person_id):
'''
Proccess and collect data for merge box
@param person_id: person id of the current page's profile
@type person_id: int
@return: data required to built the merge box
@rtype: dict
'''
merge_data = dict()
search_param = webapi.get_canonical_id_from_person_id(person_id)
name_variants = [element[0] for element in webapi.get_person_names_from_id(person_id)]
relevant_name = most_relevant_name(name_variants)
if relevant_name:
search_param = relevant_name.split(",")[0]
merge_data['search_param'] = search_param
merge_data['canonical_id'] = webapi.get_canonical_id_from_person_id(person_id)
return merge_data
@staticmethod
def _internal_ids_box(person_id, user_pid, ulevel):
'''
Proccess and collect data for external_ids box
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the external_ids box
@rtype: dict
'''
external_ids_data = dict()
external_ids_data['uid'],external_ids_data['old_uids'] = webapi.get_internal_user_id_from_person_id(person_id)
external_ids_data['person_id'] = person_id
external_ids_data['user_pid'] = user_pid
external_ids_data['ulevel'] = ulevel
return external_ids_data
@staticmethod
def _external_ids_box(person_id, user_pid, ulevel):
'''
Proccess and collect data for external_ids box
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the external_ids box
@rtype: dict
'''
internal_ids_data = dict()
internal_ids_data['ext_ids'] = webapi.get_external_ids_from_person_id(person_id)
internal_ids_data['person_id'] = person_id
internal_ids_data['user_pid'] = user_pid
internal_ids_data['ulevel'] = ulevel
return internal_ids_data
@staticmethod
def _hepnames_box(person_id):
return webapi.get_hepnames(person_id)
def tickets_admin(self, req, form):
'''
Generate SSO landing/welcome page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
webapi.session_bareinit(req)
no_access = self._page_access_permission_wall(req, req_level='admin')
if no_access:
return no_access
pinfo = session['personinfo']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(pinfo['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "open_tickets", ln, is_owner, self._is_admin(pinfo))
title = "Open RT tickets"
profile_page = WebProfilePage("help", title, no_cache=True)
profile_page.add_profile_menu(menu)
tickets = webapi.get_persons_with_open_tickets_list()
tickets = list(tickets)
for t in list(tickets):
tickets.remove(t)
tickets.append([webapi.get_most_frequent_name_from_pid(int(t[0])),
webapi.get_person_redirect_link(t[0]), t[0], t[1]])
content = TEMPLATE.tmpl_tickets_admin(tickets)
content = TEMPLATE.tmpl_person_detail_layout(content)
body = profile_page.get_wrapped_body(content)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def help(self, req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
_ = gettext_set_language(ln)
if not CFG_INSPIRE_SITE:
return page_not_authorized(req, text=_("This page is not accessible directly."))
webapi.session_bareinit(req)
pinfo = session['personinfo']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(pinfo['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "help", ln, is_owner, self._is_admin(pinfo))
title = "Help page"
profile_page = WebProfilePage("help", title, no_cache=True)
profile_page.add_profile_menu(menu)
content = TEMPLATE.tmpl_help_page()
body = profile_page.get_wrapped_body(content)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def export(self, req, form):
'''
Generate JSONized export of Person data
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'request': (str, None),
'userid': (str, None)})
if not CFG_JSON_AVAILABLE:
return "500_json_not_found__install_package"
request = None
userid = None
if "userid" in argd and argd['userid']:
userid = argd['userid']
else:
return "404_user_not_found"
if "request" in argd and argd['request']:
request = argd["request"]
# find user from ID
user_email = get_email_from_username(userid)
if user_email == userid:
return "404_user_not_found"
uid = get_uid_from_email(user_email)
uinfo = collect_user_info(uid)
# find person by uid
pid = webapi.get_pid_from_uid(uid)
# find papers py pid that are confirmed through a human.
papers = webapi.get_papers_by_person_id(pid, 2)
# filter by request param, e.g. arxiv
if not request:
return "404__no_filter_selected"
if not request in VALID_EXPORT_FILTERS:
return "500_filter_invalid"
if request == "arxiv":
query = "(recid:"
query += " OR recid:".join(papers)
query += ") AND 037:arxiv"
db_docs = perform_request_search(p=query, rg=0)
nickmail = ""
nickname = ""
db_arxiv_ids = []
try:
nickname = uinfo["nickname"]
except KeyError:
pass
if not nickname:
try:
nickmail = uinfo["email"]
except KeyError:
nickmail = user_email
nickname = nickmail
db_arxiv_ids = get_fieldvalues(db_docs, "037__a")
construct = {"nickname": nickname,
"claims": ";".join(db_arxiv_ids)}
jsondmp = json.dumps(construct)
signature = webapi.sign_assertion("arXiv", jsondmp)
construct["digest"] = signature
return json.dumps(construct)
index = __call__
class WebInterfaceBibAuthorIDManageProfilePages(WebInterfaceDirectory):
_exports = ['',
'import_orcid_pubs',
'connect_author_with_hepname',
'connect_author_with_hepname_ajax',
'suggest_orcid',
'suggest_orcid_ajax']
def _lookup(self, component, path):
'''
This handler parses dynamic URLs:
- /author/profile/1332 shows the page of author with id: 1332
- /author/profile/100:5522,1431 shows the page of the author
identified by the bibrefrec: '100:5522,1431'
'''
if not component in self._exports:
return WebInterfaceBibAuthorIDManageProfilePages(component), path
def _is_profile_owner(self, pid):
return self.person_id == int(pid)
def _is_admin(self, pinfo):
return pinfo['ulevel'] == 'admin'
def __init__(self, identifier=None):
'''
Constructor of the web interface.
@param identifier: identifier of an author. Can be one of:
- an author id: e.g. "14"
- a canonical id: e.g. "J.R.Ellis.1"
- a bibrefrec: e.g. "100:1442,155"
@type identifier: str
'''
self.person_id = -1 # -1 is a non valid author identifier
if identifier is None or not isinstance(identifier, str):
return
self.original_identifier = identifier
# check if it's a canonical id: e.g. "J.R.Ellis.1"
try:
pid = int(identifier)
except ValueError:
pid = int(webapi.get_person_id_from_canonical_id(identifier))
if pid >= 0:
self.person_id = pid
return
# check if it's an author id: e.g. "14"
try:
pid = int(identifier)
if webapi.author_has_papers(pid):
self.person_id = pid
return
except ValueError:
pass
# check if it's a bibrefrec: e.g. "100:1442,155"
if webapi.is_valid_bibref(identifier):
pid = int(webapi.get_person_id_from_paper(identifier))
if pid >= 0:
self.person_id = pid
return
def __call__(self, req, form):
'''
Generate SSO landing/author management page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
webapi.session_bareinit(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
person_id = self.person_id
uid = getUid(req)
pinfo['claim_in_process'] = True
argd = wash_urlargd(form, {
'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
_ = gettext_set_language(ln)
if not CFG_INSPIRE_SITE or self.person_id is None:
return page_not_authorized(req, text=_("This page is not accessible directly."))
if person_id < 0:
return self._error_page(req, message=("Identifier %s is not a valid person identifier or does not exist anymore!" % self.original_identifier))
# log the visit
webapi.history_log_visit(req, 'manage_profile', pid=person_id)
# store the arxiv papers the user owns
if uid > 0 and not pinfo['arxiv_status']:
uinfo = collect_user_info(req)
arxiv_papers = list()
if 'external_arxivids' in uinfo and uinfo['external_arxivids']:
arxiv_papers = uinfo['external_arxivids'].split(';')
if arxiv_papers:
webapi.add_arxiv_papers_to_author(arxiv_papers, person_id)
pinfo['arxiv_status'] = True
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_info = webapi.get_login_info(uid, params)
title_message = _('Profile management')
ssl_param = 0
if req.is_https():
ssl_param = 1
# Create Wrapper Page Markup
cname = webapi.get_canonical_id_from_person_id(self.person_id)
if cname == self.person_id:
return page_not_authorized(req, text=_("This page is not accessible directly."))
menu = WebProfileMenu(cname, "manage_profile", ln, self._is_profile_owner(pinfo['pid']), self._is_admin(pinfo))
profile_page = WebProfilePage("manage_profile", webapi.get_longest_name_from_pid(self.person_id), no_cache=True)
profile_page.add_profile_menu(menu)
gboxstatus = self.person_id
gpid = self.person_id
gNumOfWorkers = 3 # to do: read it from conf file
gReqTimeout = 3000
gPageTimeout = 12000
profile_page.add_bootstrapped_data(json.dumps({
"other": "var gBOX_STATUS = '%s';var gPID = '%s'; var gNumOfWorkers= '%s'; var gReqTimeout= '%s'; var gPageTimeout= '%s';" % (gboxstatus, gpid, gNumOfWorkers, gReqTimeout, gPageTimeout),
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s"});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
user_pid = webapi.get_user_pid(login_info['uid'])
person_data = webapi.get_person_info_by_pid(person_id)
# proccess and collect data for every box [LEGACY]
arxiv_data = WebInterfaceBibAuthorIDClaimPages._arxiv_box(req, login_info, person_id, user_pid)
orcid_data = WebInterfaceBibAuthorIDClaimPages._orcid_box(arxiv_data['login'], person_id, user_pid, ulevel)
claim_paper_data = WebInterfaceBibAuthorIDClaimPages._claim_paper_box(person_id)
support_data = WebInterfaceBibAuthorIDClaimPages._support_box()
ext_ids_data = None
int_ids_data = None
if ulevel == 'admin':
ext_ids_data = WebInterfaceBibAuthorIDClaimPages._external_ids_box(person_id, user_pid, ulevel)
int_ids_data = WebInterfaceBibAuthorIDClaimPages._internal_ids_box(person_id, user_pid, ulevel)
autoclaim_data = WebInterfaceBibAuthorIDClaimPages._autoclaim_papers_box(req, person_id, user_pid, login_info['logged_in_to_remote_systems'])
merge_data = WebInterfaceBibAuthorIDClaimPages._merge_box(person_id)
hepnames_data = WebInterfaceBibAuthorIDClaimPages._hepnames_box(person_id)
content = ''
# display status for any previously attempted merge
if pinfo['merge_info_message']:
teaser_key, message = pinfo['merge_info_message']
content += TEMPLATE.tmpl_merge_transaction_box(teaser_key, [message])
pinfo['merge_info_message'] = None
session.dirty = True
content += TEMPLATE.tmpl_profile_management(ln, person_data, arxiv_data,
orcid_data, claim_paper_data,
int_ids_data, ext_ids_data,
autoclaim_data, support_data,
merge_data, hepnames_data)
body = profile_page.get_wrapped_body(content)
return page(title=title_message,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def import_orcid_pubs(self, req, form):
webapi.session_bareinit(req)
pinfo = session['personinfo']
orcid_info = pinfo['orcid']
# author should have already an orcid if this method was triggered
orcid_id = get_orcid_id_of_author(pinfo['pid'])[0][0]
orcid_dois = get_dois_from_orcid(orcid_id)
# TODO: what to do in case some ORCID server error occurs?
if orcid_dois is None:
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, pinfo['pid']))
# TODO: it would be smarter if:
# 1. we save in the db the orcid_dois
# 2. to expire only the external pubs box in the profile page
webauthorapi.expire_all_cache_for_personid(pinfo['pid'])
orcid_info['imported_pubs'] = orcid_dois
orcid_info['import_pubs'] = True
session.dirty = True
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, pinfo['pid']))
def connect_author_with_hepname(self, req, form):
argd = wash_urlargd(form, {'cname':(str, None),
'hepname': (str, None),
'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
if argd['cname'] is not None:
cname = argd['cname']
else:
return self._error_page(req, ln, "Fatal: cannot associate a hepname without a person id.")
if argd['hepname'] is not None:
hepname = argd['hepname']
else:
return self._error_page(req, ln, "Fatal: cannot associate an author with a non valid hepname.")
webapi.connect_author_with_hepname(cname, hepname)
webapi.session_bareinit(req)
pinfo = session['personinfo']
last_visited_page = webapi.history_get_last_visited_url(pinfo['visit_diary'], just_page=True)
redirect_to_url(req, "%s/author/%s/%s" % (CFG_SITE_URL, last_visited_page, cname))
def connect_author_with_hepname_ajax(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
cname = json_data['cname']
hepname = json_data['hepname']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
pinfo = session['personinfo']
if not self._is_admin(pinfo):
webapi.connect_author_with_hepname(cname, hepname)
else:
uid = getUid(req)
add_cname_to_hepname_record(cname, hepname, uid)
def suggest_orcid(self, req, form):
argd = wash_urlargd(form, {'orcid':(str, None),
'pid': (int, -1),
'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln, "Fatal: cannot associate an orcid without a person id.")
if argd['orcid'] is not None and is_valid_orcid(argd['orcid']):
orcid = argd['orcid']
else:
return self._error_page(req, ln, "Fatal: cannot associate an author with a non valid ORCiD.")
webapi.connect_author_with_orcid(webapi.get_canonical_id_from_person_id(pid), orcid)
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, pid))
def suggest_orcid_ajax(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
orcid = json_data['orcid']
pid = json_data['pid']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
if not is_valid_orcid(orcid):
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.connect_author_with_orcid(webapi.get_canonical_id_from_person_id(pid), orcid)
def _fail(self, req, code):
req.status = code
return
def _error_page(self, req, ln=CFG_SITE_LANG, message=None, intro=True):
'''
Create a page that contains a message explaining the error.
@param req: Apache Request Object
@type req: Apache Request Object
@param ln: language
@type ln: string
@param message: message to be displayed
@type message: string
'''
body = []
_ = gettext_set_language(ln)
if not message:
message = "No further explanation available. Sorry."
if intro:
body.append(_("<p>We're sorry. An error occurred while "
"handling your request. Please find more information "
"below:</p>"))
body.append("<p><strong>%s</strong></p>" % message)
return page(title=_("Notice"),
body="\n".join(body),
description="%s - Internal Error" % CFG_SITE_NAME,
keywords="%s, Internal Error" % CFG_SITE_NAME,
language=ln,
req=req)
index = __call__
class WebInterfaceAuthorTicketHandling(WebInterfaceDirectory):
_exports = ['get_status',
'update_status',
'add_operation',
'modify_operation',
'remove_operation',
'commit',
'abort']
@staticmethod
def bootstrap_status(pinfo, on_ticket):
'''
Function used for generating get_status json bootstrapping.
@param pinfo: person_info
@type req: dict
@param on_ticket: ticket target
@type on_ticket: str
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
author_ticketing = WebInterfaceAuthorTicketHandling()
ticket = author_ticketing._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return "{}"
ticket_status = webapi.get_ticket_status(ticket)
return json.dumps(ticket_status)
def get_status(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket_status = webapi.get_ticket_status(ticket)
session.dirty = True
req.content_type = 'application/json'
req.write(json.dumps(ticket_status))
def update_status(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.update_ticket_status(ticket)
session.dirty = True
def add_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
def modify_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_modified = webapi.construct_operation(operation_parts, pinfo, uid, should_have_bibref=False)
if operation_to_be_modified is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
operation_is_modified = webapi.modify_operation_from_ticket(operation_to_be_modified, ticket)
if not operation_is_modified:
# Operation couldn't be modified because it doesn't exist in the
# ticket. Wrong parameters were given hence we should fail!
return self._fail(req, apache.HTTP_NOT_FOUND)
session.dirty = True
def remove_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_removed = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_removed is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
operation_is_removed = webapi.remove_operation_from_ticket(operation_to_be_removed, ticket)
if not operation_is_removed:
# Operation couldn't be removed because it doesn't exist in the
# ticket. Wrong parameters were given hence we should fail!
return self._fail(req, apache.HTTP_NOT_FOUND)
session.dirty = True
def commit(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
additional_info = {'first_name': json_data.get('first_name',"Default"),
'last_name': json_data.get('last_name',"Default"),
'email': json_data.get('email',"Default"),
'comments': json_data['comments']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
uid = getUid(req)
user_is_guest = isGuestUser(uid)
if not user_is_guest:
try:
additional_info['first_name'] = session['user_info']['external_firstname']
additional_info['last_name'] = session['user_info']['external_familyname']
additional_info['email'] = session['user_info']['email']
except KeyError:
additional_info['first_name'] = additional_info['last_name'] = additional_info['email'] = str(uid)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
# When a guest is claiming we should not commit if he
# doesn't provide us his full personal information
strict_check = user_is_guest
userinfo = webapi.fill_out_userinfo(additional_info, uid, req.remote_ip, ulevel, strict_check=strict_check)
if userinfo is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.commit_operations_from_ticket(ticket, userinfo, uid, ulevel)
session.dirty = True
def abort(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
# When a user is claiming we should completely delete his ticket if he
# aborts the claiming procedure
delete_ticket = (on_ticket == 'user')
webapi.abort_ticket(ticket, delete_ticket=delete_ticket)
session.dirty = True
def _get_according_ticket(self, on_ticket, pinfo):
ticket = None
if on_ticket == 'user':
ticket = pinfo['ticket']
elif on_ticket == 'autoclaim':
ticket = pinfo['autoclaim']['ticket']
return ticket
def _fail(self, req, code):
req.status = code
return
class WebAuthorSearch(WebInterfaceDirectory):
"""
Provides an interface to profile search using AJAX queries.
"""
_exports = ['list',
'details']
# This class requires JSON libraries
assert CFG_JSON_AVAILABLE, "[WebAuthorSearch] JSON must be enabled."
class QueryPerson(WebInterfaceDirectory):
_exports = ['']
MIN_QUERY_LENGTH = 2
QUERY_REGEX = re.compile(r"[\w\s\.\-,@]+$", re.UNICODE)
def __init__(self, query=None):
self.query = query
def _lookup(self, component, path):
if component not in self._exports:
return WebAuthorSearch.QueryPerson(component), path
def __call__(self, req, form):
if self.query is None or len(self.query) < self.MIN_QUERY_LENGTH:
req.status = apache.HTTP_BAD_REQUEST
return "Query too short"
if not self.QUERY_REGEX.match(self.query):
req.status = apache.HTTP_BAD_REQUEST
return "Invalid query."
pid_results = [{"pid": pid[0]} for pid in webapi.search_person_ids_by_name(self.query)]
req.content_type = 'application/json'
return json.dumps(pid_results)
# Request for index handled by __call__
index = __call__
def _JSON_received(self, form):
try:
return "jsondata" in form
except TypeError:
return False
def _extract_JSON(self, form):
try:
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
return json_data
except ValueError:
return None
def _get_pid_details(self, pid):
details = webapi.get_person_info_by_pid(pid)
details.update({
"names": [{"name": x, "paperCount": y} for x, y in webapi.get_person_names_from_id(pid)],
"externalIds": [{x: y} for x, y in webapi.get_external_ids_from_person_id(pid).items()]
})
details['cname'] = details.pop("canonical_name", None)
return details
def details(self, req, form):
if self._JSON_received(form):
try:
json_data = self._extract_JSON(form)
pids = json_data['pids']
req.content_type = 'application/json'
details = [self._get_pid_details(pid) for pid in pids]
return json.dumps(details)
except (TypeError, KeyError):
req.status = apache.HTTP_BAD_REQUEST
return "Invalid query."
else:
req.status = apache.HTTP_BAD_REQUEST
return "Incorrect query format."
list = QueryPerson()
class WebInterfaceAuthor(WebInterfaceDirectory):
'''
Handles /author/* pages.
Supplies the methods:
/author/choose_profile
/author/claim/
/author/help
/author/manage_profile
/author/merge_profiles
/author/profile/
/author/search
/author/ticket/
'''
_exports = ['',
'choose_profile',
'claim',
'help',
'manage_profile',
'merge_profiles',
'profile',
'search',
'search_ajax',
'ticket']
from invenio.legacy.webauthorprofile.webinterface import WebAuthorPages
claim = WebInterfaceBibAuthorIDClaimPages()
profile = WebAuthorPages()
choose_profile = claim.choose_profile
help = claim.help
manage_profile = WebInterfaceBibAuthorIDManageProfilePages()
merge_profiles = claim.merge_profiles
search = claim.search
search_ajax = WebAuthorSearch()
ticket = WebInterfaceAuthorTicketHandling()
def _lookup(self, component, path):
if component not in self._exports:
return WebInterfaceAuthor(component), path
def __init__(self, component=None):
self.path = component
def __call__(self, req, form):
if self.path is None or len(self.path) < 1:
redirect_to_url(req, "%s/author/search" % CFG_BASE_URL)
# Check if canonical id: e.g. "J.R.Ellis.1"
pid = get_person_id_from_canonical_id(self.path)
if pid >= 0:
url = "%s/author/profile/%s" % (CFG_BASE_URL, get_person_redirect_link(pid))
redirect_to_url(req, url, redirection_type=apache.HTTP_MOVED_PERMANENTLY)
return
else:
try:
pid = int(self.path)
except ValueError:
redirect_to_url(req, "%s/author/search?q=%s" % (CFG_BASE_URL, self.path))
return
else:
if author_has_papers(pid):
cid = get_person_redirect_link(pid)
if is_valid_canonical_id(cid):
redirect_id = cid
else:
redirect_id = pid
url = "%s/author/profile/%s" % (CFG_BASE_URL, redirect_id)
redirect_to_url(req, url, redirection_type=apache.HTTP_MOVED_PERMANENTLY)
return
redirect_to_url(req, "%s/author/search" % CFG_BASE_URL)
return
index = __call__
class WebInterfacePerson(WebInterfaceDirectory):
'''
Handles /person/* pages.
Supplies the methods:
/person/welcome
'''
_exports = ['welcome','update', 'you']
def welcome(self, req, form):
redirect_to_url(req, "%s/author/choose_profile" % CFG_SITE_SECURE_URL)
def you(self, req, form):
redirect_to_url(req, "%s/author/choose_profile" % CFG_SITE_SECURE_URL)
def update(self, req, form):
"""
Generate hepnames update form
"""
argd = wash_urlargd(form,
{'ln': (str, CFG_SITE_LANG),
'email': (str, ''),
'IRN': (str, ''),
})
# Retrieve info for HEP name based on email or IRN
recids = []
if argd['email']:
recids = perform_request_search(p="371__m:%s" % argd['email'], cc="HepNames")
elif argd['IRN']:
recids = perform_request_search(p="001:%s" % argd['IRN'], cc="HepNames")
else:
redirect_to_url(req, "%s/collection/HepNames" % (CFG_SITE_URL))
if not recids:
redirect_to_url(req, "%s/collection/HepNames" % (CFG_SITE_URL))
else:
hepname_bibrec = get_bibrecord(recids[0])
# Extract all info from recid that should be included in the form
full_name = record_get_field_value(hepname_bibrec, tag="100", ind1="", ind2="", code="a")
display_name = record_get_field_value(hepname_bibrec, tag="880", ind1="", ind2="", code="a")
email = record_get_field_value(hepname_bibrec, tag="371", ind1="", ind2="", code="m")
status = record_get_field_value(hepname_bibrec, tag="100", ind1="", ind2="", code="g")
keynumber = record_get_field_value(hepname_bibrec, tag="970", ind1="", ind2="", code="a")
try:
keynumber = keynumber.split('-')[1]
except IndexError:
pass
research_field_list = record_get_field_values(hepname_bibrec, tag="650", ind1="1", ind2="7", code="a")
institution_list = []
for instance in record_get_field_instances(hepname_bibrec, tag="371", ind1="", ind2=""):
if not instance or field_get_subfield_values(instance, "m"):
continue
institution_info = ["", "", "", "", ""]
if field_get_subfield_values(instance, "a"):
institution_info[0] = field_get_subfield_values(instance, "a")[0]
if field_get_subfield_values(instance, "r"):
institution_info[1] = field_get_subfield_values(instance, "r")[0]
if field_get_subfield_values(instance, "s"):
institution_info[2] = field_get_subfield_values(instance, "s")[0]
if field_get_subfield_values(instance, "t"):
institution_info[3] = field_get_subfield_values(instance, "t")[0]
if field_get_subfield_values(instance, "z"):
institution_info[4] = field_get_subfield_values(instance, "z")[0]
institution_list.append(institution_info)
phd_advisor_list = record_get_field_values(hepname_bibrec, tag="701", ind1="", ind2="", code="a")
experiment_list = record_get_field_values(hepname_bibrec, tag="693", ind1="", ind2="", code="e")
web_page = record_get_field_value(hepname_bibrec, tag="856", ind1="1", ind2="", code="u")
# Create form and pass as parameters all the content from the record
body = TEMPLATE.tmpl_update_hep_name(full_name, display_name, email,
status, research_field_list,
institution_list, phd_advisor_list,
experiment_list, web_page)
title = "HEPNames"
return page(title=title,
metaheaderadd = TEMPLATE.tmpl_update_hep_name_headers(),
body=body,
req=req,
)
# pylint: enable=C0301
# pylint: enable=W0613
| MSusik/invenio | invenio/legacy/bibauthorid/webinterface.py | Python | gpl-2.0 | 146,848 | [
"VisIt"
] | b82ce81c5346a3bd60176b6a0582f2391adfef7ecbc2f1d59f4ea812a8e2cd34 |
from ase import Atoms
from ase.visualize import view
from ase.calculators.aims import Aims, AimsCube
from ase.optimize import QuasiNewton
water = Atoms('HOH', [(1,0,0), (0,0,0), (0,1,0)])
water_cube = AimsCube(points=(29,29,29),
plots=('total_density','delta_density',
'eigenstate 5','eigenstate 6'))
calc=Aims(xc='pbe',
sc_accuracy_etot=1e-6,
sc_accuracy_eev=1e-3,
sc_accuracy_rho=1e-6,
sc_accuracy_forces=1e-4,
species_dir='/home/hanke/codes/fhi-aims/fhi-aims.workshop/species_defaults/light/',
run_command='aims.workshop.serial.x',
cubes=water_cube)
water.set_calculator(calc)
dynamics = QuasiNewton(water,trajectory='square_water.traj')
dynamics.run(fmax=0.01)
view(water)
| slabanja/ase | doc/ase/calculators/H2O_aims.py | Python | gpl-2.0 | 803 | [
"ASE",
"FHI-aims"
] | e9e39171129c8c0b35262872630c5bb46c0e44611672a0e803000a8e66ba6fd8 |
import os
import sys
from tarfile import is_tarfile
from zipfile import is_zipfile
from ase.atoms import Atoms
from ase.units import Bohr, Hartree
from ase.io.trajectory import PickleTrajectory
from ase.io.bundletrajectory import BundleTrajectory
from ase.calculators.singlepoint import SinglePointDFTCalculator
from ase.calculators.singlepoint import SinglePointKPoint
__all__ = ['read', 'write', 'PickleTrajectory', 'BundleTrajectory']
def read(filename, index=-1, format=None):
"""Read Atoms object(s) from file.
filename: str
Name of the file to read from.
index: int or slice
If the file contains several configurations, the last configuration
will be returned by default. Use index=n to get configuration
number n (counting from zero).
format: str
Used to specify the file-format. If not given, the
file-format will be guessed by the *filetype* function.
Known formats:
========================= =============
format short name
========================= =============
GPAW restart-file gpw
Dacapo netCDF output file dacapo
Old ASE netCDF trajectory nc
Virtual Nano Lab file vnl
ASE pickle trajectory traj
ASE bundle trajectory bundle
GPAW text output gpaw-text
CUBE file cube
XCrySDen Structure File xsf
Dacapo text output dacapo-text
XYZ-file xyz
VASP POSCAR/CONTCAR file vasp
VASP OUTCAR file vasp_out
SIESTA STRUCT file struct_out
ABINIT input file abinit
V_Sim ascii file v_sim
Protein Data Bank pdb
CIF-file cif
FHI-aims geometry file aims
FHI-aims output file aims_out
VTK XML Image Data vti
VTK XML Structured Grid vts
VTK XML Unstructured Grid vtu
TURBOMOLE coord file tmol
TURBOMOLE gradient file tmol-gradient
exciting input exi
AtomEye configuration cfg
WIEN2k structure file struct
DftbPlus input file dftb
CASTEP geom file cell
CASTEP output file castep
CASTEP trajectory file geom
ETSF format etsf.nc
DFTBPlus GEN format gen
CMR db/cmr-file db
CMR db/cmr-file cmr
LAMMPS dump file lammps
EON reactant.con file eon
Gromacs coordinates gro
Gaussian com (input) file gaussian
Gaussian output file gaussian_out
========================= =============
"""
if isinstance(filename, str):
p = filename.rfind('@')
if p != -1:
try:
index = string2index(filename[p + 1:])
except ValueError:
pass
else:
filename = filename[:p]
if isinstance(index, str):
index = string2index(index)
if format is None:
format = filetype(filename)
if format.startswith('gpw'):
import gpaw
r = gpaw.io.open(filename, 'r')
positions = r.get('CartesianPositions') * Bohr
numbers = r.get('AtomicNumbers')
cell = r.get('UnitCell') * Bohr
pbc = r.get('BoundaryConditions')
tags = r.get('Tags')
magmoms = r.get('MagneticMoments')
energy = r.get('PotentialEnergy') * Hartree
if r.has_array('CartesianForces'):
forces = r.get('CartesianForces') * Hartree / Bohr
else:
forces = None
atoms = Atoms(positions=positions,
numbers=numbers,
cell=cell,
pbc=pbc)
if tags.any():
atoms.set_tags(tags)
if magmoms.any():
atoms.set_initial_magnetic_moments(magmoms)
else:
magmoms = None
atoms.calc = SinglePointDFTCalculator(energy, forces, None, magmoms,
atoms)
kpts = []
if r.has_array('IBZKPoints'):
for w, kpt, eps_n, f_n in zip(r.get('IBZKPointWeights'),
r.get('IBZKPoints'),
r.get('Eigenvalues'),
r.get('OccupationNumbers')):
print eps_n.shape, f_n.shape
kpts.append(SinglePointKPoint(w, kpt[0], kpt[1],
eps_n[0], f_n[0] )) # XXX
atoms.calc.kpts = kpts
return atoms
if format == 'castep':
from ase.io.castep import read_castep
return read_castep(filename, index)
if format == 'castep_cell':
import ase.io.castep
return ase.io.castep.read_cell(filename, index)
if format == 'castep_geom':
import ase.io.castep
return ase.io.castep.read_geom(filename, index)
if format == 'exi':
from ase.io.exciting import read_exciting
return read_exciting(filename, index)
if format == 'xyz':
from ase.io.xyz import read_xyz
return read_xyz(filename, index)
if format == 'traj':
from ase.io.trajectory import read_trajectory
return read_trajectory(filename, index)
if format == 'bundle':
from ase.io.bundletrajectory import read_bundletrajectory
return read_bundletrajectory(filename, index)
if format == 'cube':
from ase.io.cube import read_cube
return read_cube(filename, index)
if format == 'nc':
from ase.io.netcdf import read_netcdf
return read_netcdf(filename, index)
if format == 'gpaw-text':
from ase.io.gpawtext import read_gpaw_text
return read_gpaw_text(filename, index)
if format == 'dacapo-text':
from ase.io.dacapo import read_dacapo_text
return read_dacapo_text(filename)
if format == 'dacapo':
from ase.io.dacapo import read_dacapo
return read_dacapo(filename)
if format == 'xsf':
from ase.io.xsf import read_xsf
return read_xsf(filename, index)
if format == 'vasp':
from ase.io.vasp import read_vasp
return read_vasp(filename)
if format == 'vasp_out':
from ase.io.vasp import read_vasp_out
return read_vasp_out(filename, index)
if format == 'abinit':
from ase.io.abinit import read_abinit
return read_abinit(filename)
if format == 'v_sim':
from ase.io.v_sim import read_v_sim
return read_v_sim(filename)
if format == 'mol':
from ase.io.mol import read_mol
return read_mol(filename)
if format == 'pdb':
from ase.io.pdb import read_pdb
return read_pdb(filename, index)
if format == 'cif':
from ase.io.cif import read_cif
return read_cif(filename, index)
if format == 'struct':
from ase.io.wien2k import read_struct
return read_struct(filename)
if format == 'struct_out':
from ase.io.siesta import read_struct
return read_struct(filename)
if format == 'vti':
from ase.io.vtkxml import read_vti
return read_vti(filename)
if format == 'vts':
from ase.io.vtkxml import read_vts
return read_vts(filename)
if format == 'vtu':
from ase.io.vtkxml import read_vtu
return read_vtu(filename)
if format == 'aims':
from ase.io.aims import read_aims
return read_aims(filename)
if format == 'aims_out':
from ase.io.aims import read_aims_output
return read_aims_output(filename, index)
if format == 'iwm':
from ase.io.iwm import read_iwm
return read_iwm(filename)
if format == 'Cmdft':
from ase.io.cmdft import read_I_info
return read_I_info(filename)
if format == 'tmol':
from ase.io.turbomole import read_turbomole
return read_turbomole(filename)
if format == 'tmol-gradient':
from ase.io.turbomole import read_turbomole_gradient
return read_turbomole_gradient(filename)
if format == 'cfg':
from ase.io.cfg import read_cfg
return read_cfg(filename)
if format == 'dftb':
from ase.io.dftb import read_dftb
return read_dftb(filename)
if format == 'sdf':
from ase.io.sdf import read_sdf
return read_sdf(filename)
if format == 'etsf':
from ase.io.etsf import ETSFReader
return ETSFReader(filename).read_atoms()
if format == 'gen':
from ase.io.gen import read_gen
return read_gen(filename)
if format == 'db':
from ase.io.cmr_io import read_db
return read_db(filename, index)
if format == 'lammps':
from ase.io.lammps import read_lammps_dump
return read_lammps_dump(filename, index)
if format == 'eon':
from ase.io.eon import read_reactant_con
return read_reactant_con(filename)
if format == 'gromacs':
from ase.io.gromacs import read_gromacs
return read_gromacs(filename)
if format == 'gaussian':
from ase.io.gaussian import read_gaussian
return read_gaussian(filename)
if format == 'gaussian_out':
from ase.io.gaussian import read_gaussian_out
return read_gaussian_out(filename, index)
raise RuntimeError('File format descriptor '+format+' not recognized!')
def write(filename, images, format=None, **kwargs):
"""Write Atoms object(s) to file.
filename: str
Name of the file to write to.
images: Atoms object or list of Atoms objects
A single Atoms object or a list of Atoms objects.
format: str
Used to specify the file-format. If not given, the
file-format will be taken from suffix of the filename.
The accepted output formats:
========================= ===========
format short name
========================= ===========
ASE pickle trajectory traj
ASE bundle trajectory bundle
CUBE file cube
XYZ-file xyz
VASP POSCAR/CONTCAR file vasp
ABINIT input file abinit
Protein Data Bank pdb
CIF-file cif
XCrySDen Structure File xsf
FHI-aims geometry file aims
gOpenMol .plt file plt
Python script py
Encapsulated Postscript eps
Portable Network Graphics png
Persistance of Vision pov
VTK XML Image Data vti
VTK XML Structured Grid vts
VTK XML Unstructured Grid vtu
TURBOMOLE coord file tmol
exciting exi
AtomEye configuration cfg
WIEN2k structure file struct
CASTEP cell file cell
DftbPlus input file dftb
ETSF etsf.nc
DFTBPlus GEN format gen
CMR db/cmr-file db
CMR db/cmr-file cmr
EON reactant.con file eon
Gromacs coordinates gro
GROMOS96 (only positions) g96
========================= ===========
The use of additional keywords is format specific.
The ``cube`` and ``plt`` formats accept (plt requires it) a ``data``
keyword, which can be used to write a 3D array to the file along
with the nuclei coordinates.
The ``vti``, ``vts`` and ``vtu`` formats are all specifically directed
for use with MayaVi, and the latter is designated for visualization of
the atoms whereas the two others are intended for volume data. Further,
it should be noted that the ``vti`` format is intended for orthogonal
unit cells as only the grid-spacing is stored, whereas the ``vts`` format
additionally stores the coordinates of each grid point, thus making it
useful for volume date in more general unit cells.
The ``eps``, ``png``, and ``pov`` formats are all graphics formats,
and accept the additional keywords:
rotation: str (default '')
The rotation angles, e.g. '45x,70y,90z'.
show_unit_cell: int (default 0)
Can be 0, 1, 2 to either not show, show, or show all of the unit cell.
radii: array or float (default 1.0)
An array of same length as the list of atoms indicating the sphere radii.
A single float specifies a uniform scaling of the default covalent radii.
bbox: 4 floats (default None)
Set the bounding box to (xll, yll, xur, yur) (lower left, upper right).
colors: array (default None)
An array of same length as the list of atoms, indicating the rgb color
code for each atom. Default is the jmol_colors of ase/data/colors.
scale: int (default 20)
Number of pixels per Angstrom.
For the ``pov`` graphics format, ``scale`` should not be specified.
The elements of the color array can additionally be strings, or 4
and 5 vectors for named colors, rgb + filter, and rgb + filter + transmit
specification. This format accepts the additional keywords:
``run_povray``, ``display``, ``pause``, ``transparent``,
``canvas_width``, ``canvas_height``, ``camera_dist``,
``image_plane``, ``camera_type``, ``point_lights``,
``area_light``, ``background``, ``textures``, ``celllinewidth``,
``bondlinewidth``, ``bondatoms``
"""
if format is None:
if filename == '-':
format = 'xyz'
filename = sys.stdout
elif 'POSCAR' in filename or 'CONTCAR' in filename:
format = 'vasp'
elif 'OUTCAR' in filename:
format = 'vasp_out'
elif filename.endswith('etsf.nc'):
format = 'etsf'
elif filename.lower().endswith('.con'):
format = 'eon'
elif os.path.basename(filename) == 'coord':
format = 'tmol'
else:
suffix = filename.split('.')[-1]
format = {'cell':'castep_cell',
}.get(suffix, suffix) # XXX this does not make sense
# Maybe like this:
## format = {'traj': 'trajectory',
## 'nc': 'netcdf',
## 'exi': 'exciting',
## 'in': 'aims',
## 'tmol': 'turbomole',
## }.get(suffix, suffix)
if format == 'castep_cell':
from ase.io.castep import write_cell
write_cell(filename, images, **kwargs)
return
if format == 'exi':
from ase.io.exciting import write_exciting
write_exciting(filename, images)
return
if format == 'cif':
from ase.io.cif import write_cif
write_cif(filename, images)
if format == 'xyz':
from ase.io.xyz import write_xyz
write_xyz(filename, images)
return
if format == 'gen':
from ase.io.gen import write_gen
write_gen(filename, images)
return
elif format == 'in':
format = 'aims'
elif format == 'tmol':
from ase.io.turbomole import write_turbomole
write_turbomole(filename, images)
return
elif format == 'dftb':
from ase.io.dftb import write_dftb
write_dftb(filename, images)
return
elif format == 'struct':
from ase.io.wien2k import write_struct
write_struct(filename, images, **kwargs)
return
elif format == 'findsym':
from ase.io.findsym import write_findsym
write_findsym(filename, images)
return
elif format == 'etsf':
from ase.io.etsf import ETSFWriter
writer = ETSFWriter(filename)
if not isinstance(images, (list, tuple)):
images = [images]
writer.write_atoms(images[0])
writer.close()
return
elif format == 'db' or format == 'cmr':
from ase.io.cmr_io import write_db
return write_db(filename, images, **kwargs)
elif format == 'eon':
from ase.io.eon import write_reactant_con
write_reactant_con(filename, images)
return
elif format == 'gro':
from ase.io.gromacs import write_gromacs
write_gromacs(filename, images)
return
elif format == 'g96':
from ase.io.gromos import write_gromos
write_gromos(filename, images)
return
format = {'traj': 'trajectory',
'nc': 'netcdf',
'bundle': 'bundletrajectory'
}.get(format, format)
name = 'write_' + format
if format in ['vti', 'vts', 'vtu']:
format = 'vtkxml'
if format is None:
format = filetype(filename)
try:
write = getattr(__import__('ase.io.%s' % format, {}, {}, [name]), name)
except ImportError:
raise TypeError('Unknown format: "%s".' % format)
write(filename, images, **kwargs)
def string2index(string):
if ':' not in string:
return int(string)
i = []
for s in string.split(':'):
if s == '':
i.append(None)
else:
i.append(int(s))
i += (3 - len(i)) * [None]
return slice(*i)
def filetype(filename):
"""Try to guess the type of the file."""
if os.path.isdir(filename):
# Potentially a BundleTrajectory
if BundleTrajectory.is_bundle(filename):
return 'bundle'
elif os.path.normpath(filename) == 'states':
return 'eon'
else:
raise IOError('Directory: ' + filename)
fileobj = open(filename, 'rU')
s3 = fileobj.read(3)
if len(s3) == 0:
raise IOError('Empty file: ' + filename)
if filename.lower().endswith('.db') or filename.lower().endswith('.cmr'):
return 'db'
if is_tarfile(filename):
return 'gpw'
if s3 == 'CDF':
from ase.io.pupynere import NetCDFFile
nc = NetCDFFile(filename)
if 'number_of_dynamic_atoms' in nc.dimensions:
return 'dacapo'
history = nc.history
if history == 'GPAW restart file':
return 'gpw-nc'
if history == 'ASE trajectory':
return 'nc'
if history == 'Dacapo':
return 'dacapo'
if hasattr(nc, 'file_format') and nc.file_format.startswith('ETSF'):
return 'etsf'
raise IOError('Unknown netCDF file!')
if is_zipfile(filename):
return 'vnl'
fileobj.seek(0)
lines = fileobj.readlines(1000)
if lines[0].startswith('PickleTrajectory'):
return 'traj'
if lines[1].startswith('OUTER LOOP:') or filename.lower().endswith('.cube'):
return 'cube'
if ' ___ ___ ___ _ _ _ \n' in lines:
return 'gpaw-text'
if (' &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\n'
in lines[:90]):
return 'dacapo-text'
for line in lines:
if line[0] != '#':
word = line.strip()
if word in ['ANIMSTEPS', 'CRYSTAL', 'SLAB', 'POLYMER', 'MOLECULE']:
return 'xsf'
filename_v = os.path.basename(filename)
if 'POSCAR' in filename_v or 'CONTCAR' in filename_v:
return 'vasp'
if 'OUTCAR' in filename_v:
return 'vasp_out'
if filename.lower().endswith('.exi'):
return 'exi'
if filename.lower().endswith('.mol'):
return 'mol'
if filename.lower().endswith('.pdb'):
return 'pdb'
if filename.lower().endswith('.cif'):
return 'cif'
if filename.lower().endswith('.struct'):
return 'struct'
if filename.lower().endswith('.struct_out'):
return 'struct_out'
fileobj.seek(0)
while True:
line = fileobj.readline()
if not line:
break
if 'Invoking FHI-aims ...' in line:
return 'aims_out'
if 'atom' in line:
data = line.split()
try:
a = Atoms(symbols=[data[4]],positions = [[float(data[1]),float(data[2]),float(data[3])]])
return 'aims'
except:
pass
if filename.lower().endswith('.in'):
return 'aims'
if filename.lower().endswith('.cfg'):
return 'cfg'
if os.path.split(filename)[1] == 'atoms.dat':
return 'iwm'
if filename.endswith('I_info'):
return 'Cmdft'
if lines[0].startswith('$coord') or os.path.basename(filename) == 'coord':
return 'tmol'
if lines[0].startswith('$grad') or os.path.basename(filename) == 'gradient':
return 'tmol-gradient'
if lines[0].startswith('Geometry'):
return 'dftb'
if filename.lower().endswith('.geom'):
return 'castep_geom'
if filename.lower().endswith('.castep'):
return 'castep'
if filename.lower().endswith('.cell'):
return 'castep_cell'
if s3 == '<?x':
from ase.io.vtkxml import probe_vtkxml
xmltype = probe_vtkxml(filename)
if xmltype == 'ImageData':
return 'vti'
elif xmltype == 'StructuredGrid':
return 'vts'
elif xmltype == 'UnstructuredGrid':
return 'vtu'
elif xmltype is not None:
raise IOError('Unknown VTK XML file!')
if filename.lower().endswith('.sdf'):
return 'sdf'
if filename.lower().endswith('.gen'):
return 'gen'
if filename.lower().endswith('.con'):
return 'eon'
if 'ITEM: TIMESTEP\n' in lines:
return 'lammps'
if filename.lower().endswith('.gro'):
return 'gromacs'
if filename.lower().endswith('.log'):
return 'gaussian_out'
if filename.lower().endswith('.com'):
return 'gaussian'
if filename.lower().endswith('.g96'):
return 'gromos'
return 'xyz'
| conwayje/ase-python | ase/io/__init__.py | Python | gpl-2.0 | 21,693 | [
"ABINIT",
"ASE",
"CASTEP",
"CRYSTAL",
"FHI-aims",
"GPAW",
"GROMOS",
"Gaussian",
"Gromacs",
"LAMMPS",
"Mayavi",
"NetCDF",
"SIESTA",
"TURBOMOLE",
"VASP",
"VTK",
"WIEN2k",
"exciting"
] | b4bc9235b05104c4b92429602faf751bd7b486839b7e327458ab8b6c58bc1698 |
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier, RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
from xgb import XGBoostClassifier
BasicSVM = Pipeline([("SVM (linear)", LinearSVC())])
NB = Pipeline([("Gaussian NB Bayes", GaussianNB())])
SGD = Pipeline([("Stochastic Gradient Descent", SGDClassifier())])
DTC = Pipeline([("Decision Tree", DecisionTreeClassifier())])
AdaBoost = Pipeline([("Ada Boost", AdaBoostClassifier())])
GradientBoosting = Pipeline([("Gradient Boosting", GradientBoostingClassifier())])
XGB = Pipeline([("XGBoost", XGBoostClassifier(num_class=2, silent=1))])
RandomForest = Pipeline([("Random Forest", RandomForestClassifier())])
| lbn/kaggle | titanic/classifier.py | Python | mit | 866 | [
"Gaussian"
] | 3ca2bdecd0c390db6ad58f1463aa263997c5e33d977011d53e814b2427c2405a |
"""
Helps to implement authentication and authorization using Auth0.
Offers functions for generating the view functions needed to implement Auth0,
a login screen, callback maker, and a function decorator for protecting
endpoints.
"""
import flask
import requests
import functools
import json
import base64
import jwt
import ga4gh.server.exceptions as exceptions
def auth_decorator(app=None):
"""
This decorator wraps a view function so that it is protected when Auth0
is enabled. This means that any request will be expected to have a signed
token in the authorization header if the `AUTH0_ENABLED` configuration
setting is True.
The authorization header will have the form:
"authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9....."
If a request is not properly signed, an attempt is made to provide the
client with useful error messages. This means that if a request is not
authorized the underlying view function will not be executed.
When `AUTH0_ENABLED` is false, this decorator will simply execute the
decorated view without observing the authorization header.
:param app:
:return: Flask view decorator
"""
def requires_auth(f):
@functools.wraps(f)
def decorated(*args, **kwargs):
# This decorator will only apply with AUTH0_ENABLED set to True.
if app.config.get('AUTH0_ENABLED', False):
client_id = app.config.get("AUTH0_CLIENT_ID")
client_secret = app.config.get("AUTH0_CLIENT_SECRET")
auth_header = flask.request.headers.get('Authorization', None)
# Each of these functions will throw a 401 is there is a
# problem decoding the token with some helpful error message.
if auth_header:
token, profile = _decode_header(
auth_header, client_id, client_secret)
else:
raise exceptions.NotAuthorizedException()
# We store the token in the session so that later
# stages can use it to connect identity and authorization.
flask._request_ctx_stack.top.current_user = profile
flask.session['auth0_key'] = token
# Now we need to make sure that on top of having a good token
# They are authorized, and if not provide an error message
is_authorized(app.cache, profile['email'])
is_active(app.cache, token)
return f(*args, **kwargs)
return decorated
return requires_auth
def decode_header(auth_header, client_id, client_secret):
"""
A function that threads the header through decoding and returns a tuple
of the token and payload if successful. This does not fully authenticate
a request.
:param auth_header:
:param client_id:
:param client_secret:
:return: (token, profile)
"""
return _decode_header(
_well_formed(
_has_token(_has_bearer(_has_header(auth_header)))),
client_id, client_secret)
def logout(cache):
"""
Logs out the current session by removing it from the cache. This is
expected to only occur when a session has
"""
cache.set(flask.session['auth0_key'], None)
flask.session.clear()
return True
def callback_maker(
cache=None, domain='', client_id='',
client_secret='', redirect_uri=''):
"""
This function will generate a view function that can be used to handle
the return from Auth0. The "callback" is a redirected session from auth0
that includes the token we can use to authenticate that session.
If the session is properly authenticated Auth0 will provide a code so our
application can identify the session. Once this has been done we ask
for more information about the identified session from Auth0. We then use
the email of the user logged in to Auth0 to authorize their token to make
further requests by adding it to the application's cache.
It sets a value in the cache that sets the current session as logged in. We
can then refer to this id_token to later authenticate a session.
:param domain:
:param client_id:
:param client_secret:
:param redirect_uri:
:return : View function
"""
def callback_handling():
code = flask.request.args.get('code')
if code is None:
raise exceptions.NotAuthorizedException(
'The callback expects a well '
'formatted code, {} was provided'.format(code))
json_header = {'content-type': 'application/json'}
# Get auth token
token_url = "https://{domain}/oauth/token".format(domain=domain)
token_payload = {
'client_id': client_id,
'client_secret': client_secret,
'redirect_uri': redirect_uri,
'code': code,
'grant_type': 'authorization_code'}
try:
token_info = requests.post(
token_url,
data=json.dumps(token_payload),
headers=json_header).json()
id_token = token_info['id_token']
access_token = token_info['access_token']
except Exception as e:
raise exceptions.NotAuthorizedException(
'The callback from Auth0 did not'
'include the expected tokens: \n'
'{}'.format(e.message))
# Get profile information
try:
user_url = \
"https://{domain}/userinfo?access_token={access_token}".format(
domain=domain, access_token=access_token)
user_info = requests.get(user_url).json()
email = user_info['email']
except Exception as e:
raise exceptions.NotAuthorizedException(
'The user profile from Auth0 did '
'not contain the expected data: \n {}'.format(e.message))
# Log token in
user = cache.get(email)
if user and user['authorized']:
cache.set(id_token, user_info)
return flask.redirect('/login?code={}'.format(id_token))
else:
return flask.redirect('/login')
return callback_handling
def render_login(
app=None, scopes='', redirect_uri='', domain='', client_id=''):
"""
This function will generate a view function that can be used to handle
the return from Auth0. The "callback" is a redirected session from auth0
that includes the token we can use to authenticate that session.
If the session is properly authenticated Auth0 will provide a code so our
application can identify the session. Once this has been done we ask
for more information about the identified session from Auth0. We then use
the email of the user logged in to Auth0 to authorize their token to make
further requests by adding it to the application's cache.
It sets a value in the cache that sets the current session as logged in. We
can then refer to this id_token to later authenticate a session.
:param app:
:param scopes:
:param redirect_uri:
:param domain:
:param client_id:
:return : Rendered login template
"""
return app.jinja_env.from_string(LOGIN_HTML).render(
scopes=scopes,
redirect_uri=redirect_uri,
domain=domain,
client_id=client_id)
def render_key(app, key=""):
"""
Renders a view from the app and a key that lets the current session grab
its token.
:param app:
:param key:
:return: Rendered view
"""
return app.jinja_env.from_string(KEY_HTML).render(
key=key)
def authorize_email(email='davidcs@ucsc.edu', cache=None):
"""
Adds an email address to the list of authorized emails stored in an
ephemeral cache.
:param email:
"""
# TODO safely access cache
cache.set(email, {'authorized': True})
def _has_header(auth_header):
if not auth_header:
raise exceptions.NotAuthorizedException(
'Authorization header is expected.')
return auth_header
def _has_bearer(auth_header):
parts = auth_header.split()
if parts[0].lower() != 'bearer':
raise exceptions.NotAuthorizedException(
'Authorization header must start with "Bearer".')
return auth_header
def _has_token(auth_header):
parts = auth_header.split()
if len(parts) == 1:
raise exceptions.NotAuthorizedException(
'Token not found in header.')
return auth_header
def _well_formed(auth_header):
parts = auth_header.split()
if len(parts) > 2:
raise exceptions.NotAuthorizedException(
'Authorization header must be Bearer + \s + token.')
return auth_header
def _decode_header(auth_header, client_id, client_secret):
"""
Takes the header and tries to return an active token and decoded
payload.
:param auth_header:
:param client_id:
:param client_secret:
:return: (token, profile)
"""
try:
token = auth_header.split()[1]
b64secret = client_secret.replace(
"_", "/").replace("-", "+")
payload = jwt.decode(
token,
base64.b64decode(b64secret),
audience=client_id)
except jwt.ExpiredSignature:
raise exceptions.NotAuthorizedException(
'Token has expired, please log in again.')
# is valid client
except jwt.InvalidAudienceError:
message = 'Incorrect audience, expected: {}'.format(
client_id)
raise exceptions.NotAuthorizedException(message)
# is valid token
except jwt.DecodeError:
raise exceptions.NotAuthorizedException(
'Token signature could not be validated.')
except Exception as e:
raise exceptions.NotAuthorizedException(
'Token signature was malformed. {}'.format(e.message))
return token, payload
def is_authorized(cache, email):
if not cache.get(email):
message = '{} is not authorized to ' \
'access this resource'.format(email)
raise exceptions.NotAuthenticatedException(message)
return email
def is_active(cache, token):
"""
Accepts the cache and ID token and checks to see if the profile is
currently logged in. If so, return the token, otherwise throw a
NotAuthenticatedException.
:param cache:
:param token:
:return:
"""
profile = cache.get(token)
if not profile:
raise exceptions.NotAuthenticatedException(
'The token is good, but you are not logged in. Please '
'try logging in again.')
return profile
# This HTML string is used to render the login page. It is a jinja template.
LOGIN_HTML = """<html>
<head>
<title>Log in</title></head><body><div>
<script src="https://cdn.auth0.com/js/lock/10.0/lock.min.js"></script>
<script type="text/javascript">
var lock = new Auth0Lock('{{ client_id }}', '{{ domain }}', {
auth: {
redirectUrl: '{{ redirect_uri }}',
responseType: 'code',
params: {
scope: '{{ scopes }}' // https://auth0.com/docs/scopes
}
}
});
lock.show();
</script>
</div>"""
KEY_HTML = """<html>
<head>
<title>GA4GH Server API Token</title></head><body><div>
<h1>Your API Token</h1>
<p>Your token is now active, add it as your "Authorization: bearer $TOKEN" header
when making requests to protected endpoints</p>
<textarea cols=120 rows=5 onClick='this.select()' readonly>{{ key }}</textarea>
<h3><a href="/?key={{ key }}">Visit landing page</a></h3>
</div>
""" # noqa
| david4096/ga4gh-server | ga4gh/server/auth/__init__.py | Python | apache-2.0 | 11,719 | [
"VisIt"
] | 5b4827d6acf631af4bbd30eac11e8bac0280c227758369017a0f63cc052c2b94 |
import re
import textwrap
import numpy as np
from pysisyphus.calculators.Calculator import Calculator
class Psi4(Calculator):
conf_key = "psi4"
def __init__(
self,
method,
basis,
to_set=None,
pcm="iefpcm",
solvent=None,
write_fchk=False,
**kwargs,
):
super().__init__(**kwargs)
self.method = method
self.basis = basis
self.to_set = {} if to_set is None else dict(to_set)
self.pcm = pcm
self.solvent = solvent
self.write_fchk = write_fchk
self.inp_fn = "psi4.inp"
self.out_fn = "psi4.out"
self.to_keep = ("inp", "psi4.out", "grad.npy", "hessian.npy")
self.parser_funcs = {
"energy": self.parse_energy,
"grad": self.parse_grad,
"hessian": self.parse_hessian,
}
self.base_cmd = self.get_cmd()
self.inp = textwrap.dedent(
"""
molecule mol{{
{xyz}
{charge} {mult}
symmetry c1
}}
set_num_threads({pal})
memory {mem} MB
{basis}
{to_set}
{pcm}
{method}
{fchk}
"""
)
def get_fchk_str(self):
fchk_str = ""
if self.write_fchk:
fchk_fn = self.make_fn("wfn.fchk")
fchk_str = (
"fchk_writer = psi4.FCHKWriter(wfn)\n" f"fchk_writer.write('{fchk_fn}')"
)
return fchk_str
def prepare_input(self, atoms, coords, calc_type):
xyz = self.prepare_coords(atoms, coords)
calc_types = {
"energy": "E, wfn = energy('{}', return_wfn=True)",
# Right now we don't need the wavefunction
# "grad": "G, wfn = gradient('{}', return_wfn=True)\n" \
# "G_arr = np.array(G)\n" \
# "np.save('grad', G_arr)",
# "hessian": "H, wfn = hessian('{}', return_wfn=True)\n" \
# "H_arr = np.array(H)\n" \
# "np.save('hessian', H_arr)",
"grad": "G, wfn = gradient('{}', return_wfn=True)\n"
"G_arr = np.array(G)\n"
"np.save('grad', G_arr)",
"hessian": "H, wfn = hessian('{}', return_wfn=True)\n"
"H_arr = np.array(H)\n"
"np.save('hessian', H_arr)",
}
method = calc_types[calc_type].format(self.method)
wfn_path = self.make_fn("wfn.npy")
method += f"\nWavefunction.to_file(wfn, '{wfn_path}')"
method += "\nprint('PARSE ENERGY:', wfn.energy())"
set_strs = [f"set {key} {value}" for key, value in self.to_set.items()]
set_strs = "\n".join(set_strs)
# Basis section
basis = self.basis
# Construct more complex basis input
if isinstance(basis, dict):
# Check if a global basis is given for all atoms. This must come
# first, otherwise Psi4 throws an error.
basis_lines = [
"basis {",
]
try:
basis_lines.append(f"assign {basis['assign']}")
except KeyError:
pass
# Add remaining lines
basis_lines.extend(
[
f"assign {atms} {bas}"
for atms, bas in basis.items()
if atms != "assign"
]
)
basis_lines.append("}")
basis = "\n".join(basis_lines)
# Use set when self.basis is a string
else:
basis = f"set basis {basis}"
# PCM section
pcm = ""
if self.solvent:
pcm = textwrap.dedent(
f"""
set pcm true
pcm = {{
Medium {{
SolverType = {self.pcm}
Solvent = {self.solvent}
}}
Cavity {{
Type = GePol
}}
}}
"""
)
inp = self.inp.format(
xyz=xyz,
charge=self.charge,
mult=self.mult,
basis=basis,
to_set=set_strs,
pcm=pcm,
method=method,
pal=self.pal,
mem=self.mem,
fchk=self.get_fchk_str(),
)
# inp = "\n".join([line.strip() for line in inp.split("\n")])
return inp
def get_energy(self, atoms, coords):
calc_type = "energy"
inp = self.prepare_input(atoms, coords, calc_type)
results = self.run(inp, calc="energy")
return results
def get_forces(self, atoms, coords):
calc_type = "grad"
inp = self.prepare_input(atoms, coords, calc_type)
results = self.run(inp, calc="grad")
return results
def get_hessian(self, atoms, coords):
calc_type = "hessian"
inp = self.prepare_input(atoms, coords, calc_type)
results = self.run(inp, calc="hessian")
return results
def run_calculation(self, atoms, coords):
return self.get_energy(atoms, coords)
def parse_energy(self, path):
with open(path / "psi4.out") as handle:
text = handle.read()
en_regex = re.compile(r"PARSE ENERGY: ([\d\-\.]+)")
mobj = en_regex.search(text)
result = {"energy": float(mobj[1])}
return result
def parse_grad(self, path):
gradient = np.load(path / "grad.npy")
forces = -gradient.flatten()
result = {
"forces": forces,
}
result.update(self.parse_energy(path))
return result
def parse_hessian(self, path):
hessian = np.load(path / "hessian.npy")
result = {
"hessian": hessian,
}
result.update(self.parse_energy(path))
return result
def __str__(self):
return f"Psi4({self.name})"
| eljost/pysisyphus | pysisyphus/calculators/Psi4.py | Python | gpl-3.0 | 5,903 | [
"Psi4"
] | c60693ae4a0f8a8a71096aadbbc987f74198d8919845ad9784f64bc519a3a9dc |
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Build the ESPResSo logo with particles.
"""
import math
import numpy as np
import espressomd
espressomd.assert_features(["WCA", "MASS"])
import espressomd.shapes
from espressomd.visualization_opengl import openGLLive
box_l = 50
system = espressomd.System(box_l=[box_l, 15, box_l])
system.set_random_state_PRNG()
#system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
np.random.seed(seed=system.seed)
yoff = 3
# cup
cup_top_circ = 21
cup_bot_circ = 15
cup_height = 6
for i in range(cup_height):
circ = cup_bot_circ + i * \
(cup_top_circ - cup_bot_circ) / float(cup_height - 1)
rad = circ / (2.0 * np.pi)
alpha = 2.0 * np.pi / int(circ)
posy = yoff + i
for j in range(int(circ)):
posx = box_l / 2.0 + rad * math.sin(j * alpha + (np.pi / 2.0))
posz = box_l / 2.0 + rad * math.cos(j * alpha + (np.pi / 2.0))
system.part.add(pos=[posx, posy, posz], type=0)
# cup bottom
rad = cup_bot_circ / (2.0 * np.pi)
posy = yoff
while rad > 1.0:
rad -= 0.9
circ = 2.0 * np.pi * rad
alpha = 2.0 * np.pi / int(circ)
for j in range(int(circ)):
posx = box_l / 2.0 + rad * math.sin(j * alpha + (np.pi / 2.0))
posz = box_l / 2.0 + rad * math.cos(j * alpha + (np.pi / 2.0))
system.part.add(pos=[posx, posy, posz], type=0)
# cup handle
hand_rad = (cup_height - 4.0) / math.sqrt(2.0)
hand_circ = (1.5 * np.pi * hand_rad)
hand_xoff = (cup_bot_circ + cup_top_circ) / (4.0 * np.pi) + 1.2
hand_yoff = yoff + cup_height / 2.0 - 0.2
alpha = 2.0 * np.pi / int(4.0 * hand_circ / 3.0)
beta = math.sin((cup_top_circ - cup_bot_circ) / (2.0 * np.pi * cup_height - 1))
beta = beta - np.pi / 8.0
posz = (box_l / 2.0) + 0.5
for i in range(int(hand_circ)):
posx = hand_xoff + box_l / 2.0 + hand_rad * math.sin(i * alpha + beta)
posy = hand_yoff + hand_rad * math.cos(i * alpha + beta)
system.part.add(pos=[posx, posy, posz], type=0)
# saucer
saucer_circ = 30
s_rad_o = saucer_circ / (2.0 * np.pi)
s_rad_i = cup_bot_circ / (2.0 * np.pi)
n_saucer = int(s_rad_o - s_rad_i) + 1
n_ci = 0
for i in range(n_saucer):
n_ci += int(saucer_circ - (i * 2.0 * np.pi))
ci_val = -len(system.part) / float(n_ci)
for i in range(n_saucer):
rad = s_rad_o - i
alpha = 2.0 * np.pi / int(saucer_circ - (i * 2.0 * np.pi))
posy = yoff + 0.3 - 0.5 * i
for j in range(int(saucer_circ - (i * 2.0 * np.pi))):
posx = box_l / 2.0 + rad * math.sin(j * alpha)
posz = box_l / 2.0 + rad * math.cos(j * alpha)
system.part.add(pos=[posx, posy, posz], type=1)
# python
n_pbody = 12
posy = 3.5
posz = box_l / 2.0
diam = 0.8
mass = 0.01
fl = -1
harm = espressomd.interactions.HarmonicBond(k=400.0, r_0=diam, r_cut=5.0)
system.bonded_inter.add(harm)
for i in range(n_pbody):
posx = i * diam
system.part.add(pos=[posx, posy, posz], type=2, mass=mass)
pid = len(system.part) - 1
if i > 0:
system.part[pid].bonds = (harm, pid - 1)
if i % 3 == 0:
fl *= -1
system.part[pid].ext_force = [0, fl * 40 * mass, 0]
if i >= n_pbody - 3:
system.part[pid].ext_force = [50.0 * mass, 0, 0]
elif i == 0:
system.part[pid].ext_force = [-20 * mass, 0, 0]
# steam
fene = espressomd.interactions.FeneBond(k=15.1, d_r_max=2.0, r_0=0.1)
system.bonded_inter.add(fene)
n_steam = 6
l_steam = 12
rad = (cup_top_circ - 12.5) / (2.0 * np.pi)
alpha = 2.0 * np.pi / int(n_steam)
for i in range(n_steam):
for j in range(l_steam):
posx = box_l / 2.0 + rad * math.sin(i * alpha + j * 0.6)
posz = box_l / 2.0 + rad * math.cos(i * alpha + j * 0.6)
posy = yoff + 2 + j * 0.1 * rad
system.part.add(pos=[posx, posy, posz], type=3)
pid = len(system.part) - 1
if j == 0:
system.part[pid].fix = [1, 1, 1]
else:
system.part[pid].bonds = (fene, pid - 1)
if j == l_steam - 1:
system.part[pid].ext_force = [0, 7.0, 0]
# stand
system.constraints.add(
shape=espressomd.shapes.Cylinder(
center=[box_l / 2.0, 1.0, box_l / 2.0],
axis=[0, 1, 0],
direction=1,
radius=7.5,
length=1),
particle_type=0,
penetrable=True)
system.time_step = 0.00022
system.cell_system.skin = 0.4
system.thermostat.set_langevin(kT=0.0, gamma=0.02, seed=42)
wca_eps = 1.0
wca_sig = 0.7
for i in range(2):
for j in range(i, 2):
system.non_bonded_inter[i, j].wca.set_params(
epsilon=wca_eps, sigma=wca_sig)
wca_eps = 1.0
wca_sig = 1.0
for i in range(3):
system.non_bonded_inter[i, 2].wca.set_params(
epsilon=wca_eps, sigma=wca_sig)
visualizer = openGLLive(
system,
background_color=[0.2, 0.2, 0.3],
camera_position=[box_l / 2.0, box_l / 4.0, 20 * 3],
particle_sizes=[0.6, 0.75, 0.9, 0.2],
particle_type_materials=['bright', 'bright', 'plastic', 'chrome'],
particle_type_colors=[[0.2, 0.2, 0.8, 1],
[0.8, 0.2, 0.2, 1],
[1, 1, 1, 1],
[0.8, 0.8, 0.8, 1]],
bond_type_materials=['chrome'],
bond_type_colors=[[0.2, 0.2, 0.2, 0.5]],
bond_type_radius=[0.1],
constraint_type_colors=[[1, 1, 1, 0.5]],
constraint_type_materials=['chrome'],
spotlight_brightness=5.0,
spotlight_focus=100,
spotlight_angle=60,
light_brightness=1.0,
ext_force_arrows=False,
draw_axis=False,
draw_box=False,
drag_enabled=True)
def rotate():
visualizer.camera.rotateSystemXL()
# visualizer.registerCallback(rotate, interval = 16)
visualizer.run(1)
| psci2195/espresso-ffans | samples/espresso_logo.py | Python | gpl-3.0 | 6,271 | [
"ESPResSo"
] | 09d1fc374cef7d72019d540de165a2ded69d9db4f9c557467ef87b7a4b617ca1 |
from __future__ import with_statement
import time
try:
import simplejson as json
except ImportError:
import json
import logging
from tornado.web import HTTPError
from octopus.core.communication import HttpResponse, Http400, Http404, Http403, HttpConflict, Http500
from octopus.core.enums.rendernode import *
from octopus.core import enums, singletonstats, singletonconfig
from octopus.core.framework import ResourceNotFoundError
from octopus.dispatcher.model import RenderNode
from octopus.dispatcher.model.filter.rendernode import IFilterRenderNode
from octopus.dispatcher.webservice import DispatcherBaseResource
from puliclient.model.renderNode import RenderNode as RenderNodeModel
logger = logging.getLogger("main.dispatcher")
class RenderNodeNotFoundError(ResourceNotFoundError):
"""
Raised when a request is sent for a node that is not a attached to root.
"""
def __init__(self, node, *args, **kwargs):
ResourceNotFoundError.__init__(self, node=node, *args, **kwargs)
class RenderNodesResource(DispatcherBaseResource):
"""
Lists the render nodes known by the dispatcher.
:param: request the HTTP request
"""
def get(self):
rendernodes = self.getDispatchTree().renderNodes.values()
content = {'rendernodes': list(rendernode.to_json() for rendernode in rendernodes)}
content = json.dumps(content)
self.writeCallback(content)
class RenderNodeResource(DispatcherBaseResource):
## Sends the JSON detailed representation of a given render node, url: http://server:8004/rendernodes/<rn:port>
#
# @param request the HTTP request object for this request
# @param computerName the name of the requested render node
#
def get(self, computerName):
computerName = computerName.lower()
try:
rendernode = self.getDispatchTree().renderNodes[computerName]
except KeyError:
return Http404("RenderNode not found")
content = rendernode.to_json()
content = json.dumps(content)
self.writeCallback(content)
def post(self, computerName):
"""
A worker send a request to get registered on the server.
"""
if singletonconfig.get('CORE', 'GET_STATS'):
singletonstats.theStats.cycleCounts['add_rns'] += 1
computerName = computerName.lower()
if computerName.startswith(('1', '2')):
return Http403(message="Cannot register a RenderNode without a name", content="Cannot register a RenderNode without a name")
dct = self.getBodyAsJSON()
if computerName in self.getDispatchTree().renderNodes:
# When the registering worker is already listed in RN list
logger.warning("RenderNode already registered: %s" % computerName)
existingRN = self.getDispatchTree().renderNodes[computerName]
if 'commands' not in dct:
# No commands in current RN, reset command that might be still assigned to this RN
existingRN.reset()
else:
logger.warning("Reset commands that are assigned to this RN: %r" % dct.get('commands', '-'))
for cmdId in dct['commands']:
existingRN.commands[cmdId] = self.getDispatchTree().commands[cmdId]
if 'status' in dct:
existingRN.status = int(dct['status'])
return HttpResponse(304, "RenderNode already registered.")
else:
# Add a new worker (and set infos given in request body)
for key in ('name', 'port', 'status', 'cores', 'speed', 'ram', 'pools', 'caracteristics'):
if not key in dct:
return Http400("Missing key %r" % key, content="Missing key %r" % key)
port = int(dct['port'])
status = int(dct['status'])
if status not in (RN_UNKNOWN, RN_PAUSED, RN_IDLE, RN_BOOTING):
# FIXME: CONFLICT is not a good value maybe
return HttpConflict("Unallowed status for RenderNode registration")
cores = int(dct['cores'])
speed = float(dct['speed'])
ram = int(dct['ram'])
pools = dct['pools']
caracteristics = dct['caracteristics']
name, port = computerName.split(":", 1)
puliversion = dct.get('puliversion', "unknown")
createDate = dct.get('createDate', time.time())
renderNode = RenderNode(None, computerName, cores, speed, name, port, ram, caracteristics, puliversion=puliversion, createDate=createDate)
renderNode.status = status
poolList = []
# check the existence of the pools
for poolName in pools:
try:
pool = self.getDispatchTree().pools[poolName]
poolList.append(pool)
except KeyError:
return HttpConflict("Pool %s is not a registered pool", poolName)
# add the rendernode to the pools
for pool in poolList:
pool.addRenderNode(renderNode)
# add the rendernode to the list of rendernodes
renderNode.pools = poolList
self.getDispatchTree().renderNodes[renderNode.name] = renderNode
self.writeCallback(json.dumps(renderNode.to_json()))
#@queue
def put(self, computerName):
computerName = computerName.lower()
try:
renderNode = self.getDispatchTree().renderNodes[computerName]
except KeyError:
return Http404("RenderNode %s not found" % computerName)
dct = self.getBodyAsJSON()
for key in dct:
if key == "cores":
renderNode.coresNumber = int(dct["cores"])
elif key == "speed":
renderNode.speed = float(dct["speed"])
elif key == "ram":
renderNode.ramSize = int(dct["ram"])
else:
return Http403("Modifying %r attribute is not authorized." % key)
self.writeCallback(json.dumps(renderNode.to_json()))
# Removes a RenderNode from the dispatchTree and all pools.
# Also call RN's reset method to remove assigned commands.
#
# @param request the HTTP request object for this request
# @param computerName the name of the requested render node
#
#@fqdn_request_decorator
#@queue
def delete(self, computerName):
computerName = computerName.lower()
try:
renderNode = self.getDispatchTree().renderNodes[computerName]
except KeyError:
return Http404("RenderNode not found")
if renderNode.status in [RN_ASSIGNED, RN_WORKING]:
renderNode.reset()
for pool in self.getDispatchTree().pools.values():
pool.removeRenderNode(renderNode)
renderNode.remove()
class RenderNodeCommandsResource(DispatcherBaseResource):
#@queue
def put(self, computerName, commandId):
'''Update command `commandId` running on rendernode `renderNodeId`.
Returns "200 OK" on success, or "404 Bad Request" if the provided json data is not valid.
'''
if singletonconfig.get('CORE', 'GET_STATS'):
singletonstats.theStats.cycleCounts['update_commands'] += 1
computerName = computerName.lower()
# try:
# updateDict = self.sanitizeUpdateDict(self.getBodyAsJSON())
# except TypeError, e:
# return Http400(repr(e.args))
updateDict = self.getBodyAsJSON()
updateDict['renderNodeName'] = computerName
try:
self.framework.application.updateCommandApply(updateDict)
except (KeyError, IndexError) as e:
raise Http404(str(e))
except Exception, e:
raise Http500("Exception during command update")
self.writeCallback("Command updated")
#@queue
def delete(self, computerName, commandId):
computerName = computerName.lower()
commandId = int(commandId)
try:
computer = self.framework.application.dispatchTree.renderNodes[computerName]
except KeyError:
return HTTPError(404, "No such RenderNode")
try:
command = computer.commands[commandId]
except KeyError:
return HTTPError(404, "No such command running on this RenderNode")
if command.id not in computer.commands:
return HTTPError(400, "Command %d not running on RenderNode %s" % (command.id, computer.name))
else:
if enums.command.isFinalStatus(command.status):
if enums.command.CMD_DONE == command.status:
command.completion = 1.0
command.finish()
msg = "Command %d removed successfully." % commandId
self.writeCallback(msg)
else:
# command.cancel() ??? dans ce cas c'est pas ce qu'on devrait faire ??? FIXME
message = "Cannot remove a running command from a RenderNode."
return HTTPError(403, message)
class RenderNodeSysInfosResource(DispatcherBaseResource):
#@queue
def put(self, computerName):
computerName = computerName.lower()
rns = self.getDispatchTree().renderNodes
if not computerName in rns:
raise Http404("RenderNode not found")
dct = self.getBodyAsJSON()
renderNode = rns[computerName]
if "puliversion" in dct:
renderNode.puliversion = dct.get('puliversion', "unknown")
if "caracteristics" in dct:
renderNode.caracteristics = eval(str(dct["caracteristics"]))
if "cores" in dct:
renderNode.cores = int(dct["cores"])
if "createDate" in dct:
renderNode.createDate = int(dct["createDate"])
if "ram" in dct:
renderNode.ram = int(dct["ram"])
if "systemFreeRam" in dct:
renderNode.systemFreeRam = int(dct["systemFreeRam"])
if "systemSwapPercentage" in dct:
renderNode.systemSwapPercentage = float(dct["systemSwapPercentage"])
if "speed" in dct:
renderNode.speed = float(dct["speed"])
if "performance" in dct:
renderNode.performance = float(dct["performance"])
if "status" in dct:
if renderNode.status == RN_UNKNOWN:
renderNode.status = int(dct["status"])
logger.info("status reported is %d" % renderNode.status)
# if renderNode.status != int(dct["status"]):
# logger.warning("The status reported by %s = %r is different from the status on dispatcher %r" % (renderNode.name, RN_STATUS_NAMES[dct["status"]],RN_STATUS_NAMES[renderNode.status]))
if "isPaused" in dct and "status" in dct:
logger.debug("reported for %r: remoteStatus=%r remoteIsPaused=%r" % (renderNode.name, RN_STATUS_NAMES[dct["status"]], dct['isPaused']))
renderNode.lastAliveTime = time.time()
renderNode.isRegistered = True
class RenderNodesPerfResource(DispatcherBaseResource):
"""
Sets a performance index (float) for one or several given rendernode names
TOFIX: might not be actually used, need to verify
"""
#@queue
def put(self):
dct = self.getBodyAsJSON()
for computerName, perf in dct.items():
renderNode = self.getDispatchTree().renderNodes[computerName]
renderNode.performance = float(perf)
self.writeCallback("Performance indexes have been set.")
class RenderNodeResetResource(DispatcherBaseResource):
#@queue
def put(self, computerName):
computerName = computerName.lower()
rns = self.getDispatchTree().renderNodes
if not computerName in rns:
return Http404("RenderNode not found")
dct = self.getBodyAsJSON()
renderNode = rns[computerName]
noMoreCmd = int(dct["nomorecmd"])
if noMoreCmd:
renderNode.reset()
class RenderNodeQuarantineResource(DispatcherBaseResource):
def put(self):
"""
Used to set a quarantine on a list of rendernodes. Quarantine rns have a flag "excluded"
that prevent them to be considered in assignement process.
example: curl -d '{"quarantine":true,"rns":["vfxpc64:9005"]}' -X PUT "http://pulitest:8004/rendernodes/quarantine/"
"""
dct = self.getBodyAsJSON()
quarantine = dct["quarantine"]
rns = self.getDispatchTree().renderNodes
for computerName in dct["rns"]:
if computerName not in rns:
logger.warning("following RN '%s' is not referenced, ignoring..." % computerName)
continue
renderNode = rns[computerName]
renderNode.excluded = quarantine
if not quarantine:
renderNode.history.clear()
renderNode.tasksHistory.clear()
logging.getLogger("main.dispatcher.webservice").info("Rendernode quarantine state changed: %s -> quarantine=%s" % (computerName, quarantine))
self.writeCallback("Quarantine attributes set.")
class RenderNodePausedResource(DispatcherBaseResource):
#@queue
def put(self, computerName):
dct = self.getBodyAsJSON()
paused = dct['paused']
killproc = dct['killproc']
computerName = computerName.lower()
rns = self.getDispatchTree().renderNodes
if not computerName in rns:
return Http404("RenderNode not found")
renderNode = rns[computerName]
if paused:
renderNode.status = RN_PAUSED
if killproc:
renderNode.reset(paused=True)
else:
# FIXME maybe set this to RN_FINISHING ?
renderNode.status = RN_IDLE
renderNode.excluded = False
class RenderNodeQueryResource(DispatcherBaseResource, IFilterRenderNode):
def createRenderNodeRepr(self, pNode):
"""
Create a json representation for a given node.
param: render node to explore
return: puliclient.model.rendernode object (which is serializable)
"""
newData = RenderNodeModel()
newData.createFromNode(pNode)
return newData
def post(self):
"""
"""
self.logger = logging.getLogger('main.query')
filters = self.getBodyAsJSON()
self.logger.debug('filters: %s' % filters)
try:
start_time = time.time()
resultData = []
renderNodes = self.getDispatchTree().renderNodes.values()
totalNodes = len(renderNodes)
#
# --- filtering
#
filteredNodes = self.match(filters, renderNodes)
#
# --- Prepare the result json object
#
for currNode in filteredNodes:
tmp = self.createRenderNodeRepr(currNode)
resultData.append(tmp.encode())
content = {
'summary': {
'count': len(filteredNodes),
'totalInDispatcher': totalNodes,
'requestTime': time.time() - start_time,
'requestDate': time.ctime()
},
'items': resultData
}
# Create response and callback
self.writeCallback(json.dumps(content))
except KeyError:
raise Http404('Error unknown key')
except HTTPError, e:
raise e
except Exception, e:
raise HTTPError(500, "Impossible to retrieve render nodes (%s)" % e)
| mikrosimage/OpenRenderManagement | src/octopus/dispatcher/webservice/rendernodes.py | Python | bsd-3-clause | 15,694 | [
"Octopus"
] | ab766893f5995e5dec52a38f096f8341fb88fb39750ba85dae45662c05583cf5 |
from unittest import TestCase
import plotly.io as pio
import subprocess
import os
from distutils.version import LooseVersion
import requests
import time
import psutil
import pytest
import plotly.graph_objects as go
# Fixtures
# --------
from plotly.io._orca import find_open_port, which, orca_env
@pytest.fixture()
def setup():
# Set problematic environment variables
os.environ["NODE_OPTIONS"] = "--max-old-space-size=4096"
os.environ["ELECTRON_RUN_AS_NODE"] = "1"
# Reset orca state
pio.orca.reset_status()
pio.orca.config.restore_defaults()
# Run setup before every test function in this file
pytestmark = pytest.mark.usefixtures("setup")
# Utilities
# ---------
def ping_pongs(server_url):
try:
response = requests.post(server_url + "/ping")
except requests.exceptions.ConnectionError:
# Expected
return False
return response.status_code == 200 and response.content.decode("utf-8") == "pong"
def test_validate_orca():
assert pio.orca.status.state == "unvalidated"
pio.orca.validate_executable()
assert pio.orca.status.state == "validated"
def test_orca_not_found():
pio.orca.config.executable = "bogus"
with pytest.raises(ValueError) as err:
pio.orca.validate_executable()
assert pio.orca.status.state == "unvalidated"
assert "could not be found" in str(err.value)
def test_invalid_executable_found():
pio.orca.config.executable = "python"
with pytest.raises(ValueError) as err:
pio.orca.validate_executable()
assert pio.orca.status.state == "unvalidated"
assert "executable that was found at" in str(err.value)
def test_orca_executable_path():
assert pio.orca.status.executable is None
if os.name == "nt": # Windows
expected = subprocess.check_output(["where", "orca"]).decode("utf-8").strip()
else: # Linux / OS X
expected = subprocess.check_output(["which", "orca"]).decode("utf-8").strip()
pio.orca.validate_executable()
assert pio.orca.status.executable == expected
def test_orca_version_number():
assert pio.orca.status.version is None
expected_min = LooseVersion("1.1.0")
expected_max = LooseVersion("2.0.0")
pio.orca.validate_executable()
version = LooseVersion(pio.orca.status.version)
assert expected_min <= version
assert version < expected_max
def test_ensure_orca_ping_and_proc():
pio.orca.config.timeout = None
assert pio.orca.status.port is None
assert pio.orca.status.pid is None
pio.orca.ensure_server()
assert pio.orca.status.port is not None
assert pio.orca.status.pid is not None
server_port = pio.orca.status.port
server_pid = pio.orca.status.pid
# Make sure server has time to start up
time.sleep(10)
# Check that server process number is valid
assert psutil.pid_exists(server_pid)
# Build server URL
server_url = "http://localhost:%s" % server_port
# ping server
assert ping_pongs(server_url)
# shut down server
pio.orca.shutdown_server()
# Check that server process number no longer exists
assert not psutil.pid_exists(server_pid)
# Check that ping is no longer answered
assert not ping_pongs(server_url)
def test_server_timeout_shutdown():
# Configure server to shutdown after 10 seconds without
# calls to ensure_orca_server
pio.orca.config.timeout = 10
pio.orca.ensure_server()
server_port = pio.orca.status.port
server_pid = pio.orca.status.pid
# Build server URL
server_url = "http://localhost:%s" % server_port
# Check that server process number is valid
assert psutil.pid_exists(server_pid)
for i in range(3):
# Sleep for just under 10 seconds
time.sleep(8)
assert ping_pongs(server_url)
assert psutil.pid_exists(server_pid)
pio.orca.ensure_server()
# Sleep just over 10 seconds, server should then auto shutdown
time.sleep(11)
# Check that server process number no longer exists
assert not psutil.pid_exists(server_pid)
# Check that ping is no longer answered
assert not ping_pongs(server_url)
def test_external_server_url():
# Build server url
port = find_open_port()
server_url = "http://{hostname}:{port}".format(hostname="localhost", port=port)
# Build external orca command
orca_path = which("orca")
cmd_list = [orca_path] + [
"serve",
"-p",
str(port),
"--plotly",
pio.orca.config.plotlyjs,
"--graph-only",
]
# Run orca as subprocess to simulate external orca server
DEVNULL = open(os.devnull, "wb")
with orca_env():
proc = subprocess.Popen(cmd_list, stdout=DEVNULL)
# Start plotly managed orca server so we can ensure it gets shut down properly
pio.orca.config.port = port
pio.orca.ensure_server()
assert pio.orca.status.state == "running"
# Configure orca to use external server
pio.orca.config.server_url = server_url
# Make sure that the locally managed orca server has been shutdown and the local
# config options have been cleared
assert pio.orca.status.state == "unvalidated"
assert pio.orca.config.port is None
fig = go.Figure()
img_bytes = pio.to_image(fig, format="svg")
assert img_bytes.startswith(b"<svg class")
# Kill server orca process
proc.terminate()
| plotly/python-api | packages/python/plotly/plotly/tests/test_orca/test_orca_server.py | Python | mit | 5,402 | [
"ORCA"
] | 681c1f89af265909475285c4adbedbade8710bb4d28b87e1f8d108fe233566e9 |
# Copyright 2004-2012 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This file contains support for state-machine controlled animations.
import renpy.display
import random
class State(object):
"""
This creates a state that can be used in a SMAnimation.
"""
def __init__(self, name, image, *atlist, **properties):
"""
@param name: A string giving the name of this state.
@param image: The displayable that is shown to the user while
we are in (entering) this state. For convenience, this can
also be a string or tuple, which is interpreted with Image.
image should be None when this State is used with motion,
to indicate that the image will be replaced with the child of
the motion.
@param atlist: A list of functions to call on the image. (In
general, if something can be used in an at clause, it can be
used here as well.)
If any keyword arguments are given, they are used to construct a
Position object, that modifies the position of the image.
"""
if image and not isinstance(image, renpy.display.core.Displayable):
image = renpy.easy.displayable(image)
self.name = name
self.image = image
self.atlist = atlist
self.properties = properties
def add(self, sma):
sma.states[self.name] = self
def get_image(self):
rv = self.image
for i in self.atlist:
rv = i(rv)
if self.properties:
rv = renpy.display.layout.Position(rv, **self.properties)
return rv
def motion_copy(self, child):
if self.image is not None:
child = self.image
return State(self.name, child, *self.atlist)
class Edge(object):
"""
This creates an edge that can be used with a SMAnimation.
"""
def __init__(self, old, delay, new, trans=None, prob=1):
"""
@param old: The name (a string) of the state that this transition is from.
@param delay: The number of seconds that this transition takes.
@param new: The name (a string) of the state that this transition is to.
@param trans: The transition that will be used to show the
image found in the new state. If None, the image is show
immediately.
When used with an SMMotion, the transition should probably be
move.
@param prob: The number of times this edge is added. This can
be used to make a transition more probable then others. For
example, if one transition out of a state has prob=5, and the
other has prob=1, then the one with prob=5 will execute 5/6 of
the time, while the one with prob=1 will only occur 1/6 of the
time. (Don't make this too large, as memory use is proportional to
this value.)
"""
self.old = old
self.delay = delay
self.new = new
self.trans = trans
self.prob = prob
def add(self, sma):
for _i in xrange(0, self.prob):
sma.edges.setdefault(self.old, []).append(self)
class SMAnimation(renpy.display.core.Displayable):
"""
This creates a state-machine animation. Such an animation is
created by randomly traversing the edges between states in a
defined state machine. Each state corresponds to an image shown to
the user, with the edges corresponding to the amount of time an
image is shown, and the transition it is shown with.
Images are shown, perhaps with a transition, when we are
transitioning into a state containing that image.
"""
def __init__(self, initial, *args, **properties):
"""
@param initial: The name (a string) of the initial state we
start in.
@param showold: If the keyword parameter showold is True, then
the old image is shown instead of the new image when in an
edge.
@param anim_timebase: If True, we use the animation
timebase. If False, we use the displayable timebase.
This accepts as additional arguments the anim.State and
anim.Edge objects that are used to make up this state
machine.
"""
if 'delay' in properties:
self.delay = properties['delay']
del properties['delay']
else:
self.delay = None
if 'showold' in properties:
self.showold = properties['showold']
del properties['showold']
else:
self.showold = False
if 'anim_timebase' in properties:
self.anim_timebase = properties['anim_timebase']
del properties['anim_timebase']
else:
self.anim_timebase = True
super(SMAnimation, self).__init__(**properties)
self.properties = properties
# The initial state.
self.initial = initial
# A map from state name to State object.
self.states = { }
# A map from state name to list of Edge objects.
self.edges = { }
for i in args:
i.add(self)
# The time at which the current edge started. If None, will be
# set to st by render.
self.edge_start = None
# A cache for what the current edge looks like when rendered.
self.edge_cache = None
# The current edge.
self.edge = None
# The state we're in.
self.state = None
def visit(self):
return [ i.image for i in self.states.itervalues() ]
def pick_edge(self, state):
"""
This randomly picks an edge out of the given state, if
one exists. It updates self.edge if a transition has
been selected, or returns None if none can be found. It also
updates self.image to be the new image on the selected edge.
"""
if state not in self.edges:
self.edge = None
return
edges = self.edges[state]
self.edge = random.choice(edges)
self.state = self.edge.new
def update_cache(self):
"""
Places the correct Displayable into the edge cache, based on
what is contained in the given edge. This takes into account
the old and new states, and any transition that is present.
"""
if self.edge.trans:
im = self.edge.trans(old_widget=self.states[self.edge.old].get_image(),
new_widget=self.states[self.edge.new].get_image())
elif self.showold:
im = self.states[self.edge.old].get_image()
else:
im = self.states[self.edge.new].get_image()
self.edge_cache = im
def get_placement(self):
if self.edge_cache:
return self.edge_cache.get_placement()
if self.state:
return self.states[self.state].get_image().get_placement()
return super(SMAnimation, self).get_placement()
def render(self, width, height, st, at):
if self.anim_timebase:
t = at
else:
t = st
if self.edge_start is None or t < self.edge_start:
self.edge_start = t
self.edge_cache = None
self.pick_edge(self.initial)
while self.edge and t > self.edge_start + self.edge.delay:
self.edge_start += self.edge.delay
self.edge_cache = None
self.pick_edge(self.edge.new)
# If edge is None, then we have a permanent, static picture. Deal
# with that.
if not self.edge:
im = renpy.display.render.render(self.states[self.state].get_image(),
width, height,
st - self.edge_start, at)
# Otherwise, we have another edge.
else:
if not self.edge_cache:
self.update_cache()
im = renpy.display.render.render(self.edge_cache, width, height, t - self.edge_start, at)
if not renpy.game.less_updates:
renpy.display.render.redraw(self.edge_cache, self.edge.delay - (t - self.edge_start))
iw, ih = im.get_size()
rv = renpy.display.render.Render(iw, ih)
rv.blit(im, (0, 0))
return rv
def __call__(self, child=None, new_widget=None, old_widget=None):
"""
Used when this SMAnimation is used as a SMMotion. This creates
a duplicate of the animation, with all states containing None
as the image having that None replaced with the image that is provided here.
"""
if child is None:
child = new_widget
args = [ ]
for state in self.states.itervalues():
args.append(state.motion_copy(child))
for edges in self.edges.itervalues():
args.extend(edges)
return SMAnimation(self.initial, delay=self.delay, *args, **self.properties)
# class Animation(renpy.display.core.Displayable):
# """
# A Displayable that draws an animation, which is a series of images
# that are displayed with time delays between them.
# """
# def __init__(self, *args, **properties):
# """
# Odd (first, third, fifth, etc.) arguments to Animation are
# interpreted as image filenames, while even arguments are the
# time to delay between each image. If the number of arguments
# is odd, the animation will stop with the last image (well,
# actually delay for a year before looping). Otherwise, the
# animation will restart after the final delay time.
# @param anim_timebase: If True, the default, use the animation
# timebase. Otherwise, use the displayable timebase.
# """
# properties.setdefault('style', 'animation')
# self.anim_timebase = properties.pop('anim_timebase', True)
# super(Animation, self).__init__(**properties)
# self.images = [ ]
# self.delays = [ ]
# for i, arg in enumerate(args):
# if i % 2 == 0:
# self.images.append(renpy.easy.displayable(arg))
# else:
# self.delays.append(arg)
# if len(self.images) > len(self.delays):
# self.delays.append(365.25 * 86400.0) # One year, give or take.
# def render(self, width, height, st, at):
# if self.anim_timebase:
# t = at % sum(self.delays)
# else:
# t = st % sum(self.delays)
# for image, delay in zip(self.images, self.delays):
# if t < delay:
# renpy.display.render.redraw(self, delay - t)
# im = renpy.display.render.render(image, width, height, t, at)
# width, height = im.get_size()
# rv = renpy.display.render.Render(width, height)
# rv.blit(im, (0, 0))
# return rv
# else:
# t = t - delay
# def visit(self):
# return self.images
def Animation(*args, **kwargs):
newargs = [ ]
for i, a in enumerate(args):
newargs.append(a)
if i % 2 == 1:
newargs.append(None)
return TransitionAnimation(*newargs, **kwargs)
class TransitionAnimation(renpy.display.core.Displayable):
"""
A displayable that draws an animation with each frame separated
by a transition.
"""
def __init__(self, *args, **properties):
"""
This takes arguments such that the 1st, 4th, 7th, ...
arguments are displayables, the 2nd, 5th, 8th, ... on arguments
are times, and the 3rd, 6th, 9th, ... are transitions.
This displays the first displayable for the given time, then
transitions to the second displayable using the given
transition, and shows it for the given time (the time of the
transition is taken out of the time the frame is shown), and
so on.
The last argument may be a displayable (in which case that
displayable is used to transition back to the first frame), or
a displayable (which is shown forever).
There is one keyword argument, apart from the style properties:
@param anim_timebase: If True, the default, use the animation
timebase. Otherwise, use the displayable timebase.
"""
properties.setdefault('style', 'animation')
self.anim_timebase = properties.pop('anim_timebase', True)
super(TransitionAnimation, self).__init__(**properties)
images = [ ]
delays = [ ]
transitions = [ ]
for i, arg in enumerate(args):
if i % 3 == 0:
images.append(renpy.easy.displayable(arg))
elif i % 3 == 1:
delays.append(arg)
else:
transitions.append(arg)
if len(images) > len(delays):
delays.append(365.25 * 86400.0) # One year, give or take.
if len(images) > len(transitions):
transitions.append(None)
self.images = images
self.prev_images = [ images[-1] ] + images[:-1]
self.delays = delays
self.transitions = [ transitions[-1] ] + transitions[:-1]
def render(self, width, height, st, at):
if self.anim_timebase:
orig_t = at
else:
orig_t = st
t = orig_t % sum(self.delays)
for image, prev, delay, trans in zip(self.images, self.prev_images, self.delays, self.transitions):
if t < delay:
if not renpy.game.less_updates:
renpy.display.render.redraw(self, delay - t)
if trans and orig_t >= self.delays[0]:
image = trans(old_widget=prev, new_widget=image)
im = renpy.display.render.render(image, width, height, t, at)
width, height = im.get_size()
rv = renpy.display.render.Render(width, height)
rv.blit(im, (0, 0))
return rv
else:
t = t - delay
def visit(self):
return self.images
class Blink(renpy.display.core.Displayable):
"""
"""
def __init__(self, image, on=0.5, off=0.5, rise=0.5, set=0.5, #@ReservedAssignment
high=1.0, low=0.0, offset=0.0, anim_timebase=False, **properties):
"""
This takes as an argument an image or widget, and blinks that image
by varying its alpha. The sequence of phases is
on - set - off - rise - on - ... All times are given in seconds, all
alphas are fractions between 0 and 1.
@param image: The image or widget that will be blinked.
@param on: The amount of time the widget spends on, at high alpha.
@param off: The amount of time the widget spends off, at low alpha.
@param rise: The amount time the widget takes to ramp from low to high alpha.
@param set: The amount of time the widget takes to ram from high to low.
@param high: The high alpha.
@param low: The low alpha.
@param offset: A time offset, in seconds. Use this to have a
blink that does not start at the start of the on phase.
@param anim_timebase: If True, use the animation timebase, if false, the displayable timebase.
"""
super(Blink, self).__init__(**properties)
self.image = renpy.easy.displayable(image)
self.on = on
self.off = off
self.rise = rise
self.set = set
self.high = high
self.low = low
self.offset = offset
self.anim_timebase = anim_timebase
self.cycle = on + set + off + rise
def visit(self):
return [ self.image ]
def render(self, height, width, st, at):
if self.anim_timebase:
t = at
else:
t = st
time = (self.offset + t) % self.cycle
alpha = self.high
if 0 <= time < self.on:
delay = self.on - time
alpha = self.high
time -= self.on
if 0 <= time < self.set:
delay = 0
frac = time / self.set
alpha = self.low * frac + self.high * (1.0 - frac)
time -= self.set
if 0 <= time < self.off:
delay = self.off - time
alpha = self.low
time -= self.off
if 0 <= time < self.rise:
delay = 0
frac = time / self.rise
alpha = self.high * frac + self.low * (1.0 - frac)
rend = renpy.display.render.render(self.image, height, width, st, at)
w, h = rend.get_size()
rv = renpy.display.render.Render(w, h)
rv.blit(rend, (0, 0))
rv.alpha = alpha
if not renpy.game.less_updates:
renpy.display.render.redraw(self, delay)
return rv
def Filmstrip(image, framesize, gridsize, delay, frames=None, loop=True, **properties):
"""
This creates an animation from a single image. This image
must consist of a grid of frames, with the number of columns and
rows in the grid being taken from gridsize, and the size of each
frame in the grid being taken from framesize. This takes frames
and sticks them into an Animation, with the given delay between
each frame. The frames are taken by going from left-to-right
across the first row, left-to-right across the second row, and
so on until all frames are consumed, or a specified number of
frames are taken.
@param image: The image that the frames must be taken from.
@param framesize: A (width, height) tuple giving the size of
each of the frames in the animation.
@param gridsize: A (columns, rows) tuple giving the number of
columns and rows in the grid.
@param delay: The delay, in seconds, between frames.
@param frames: The number of frames in this animation. If None,
then this defaults to colums * rows frames, that is, taking
every frame in the grid.
@param loop: If True, loop at the end of the animation. If False,
this performs the animation once, and then stops.
Other keyword arguments are as for anim.SMAnimation.
"""
width, height = framesize
cols, rows = gridsize
if frames is None:
frames = cols * rows
i = 0
# Arguments to Animation
args = [ ]
for r in range(0, rows):
for c in range(0, cols):
x = c * width
y = r * height
args.append(renpy.display.im.Crop(image, x, y, width, height))
args.append(delay)
i += 1
if i == frames:
break
if i == frames:
break
if not loop:
args.pop()
return Animation(*args, **properties)
| MSEMJEJME/tkot | renpy/display/anim.py | Python | gpl-2.0 | 20,676 | [
"VisIt"
] | 5db435d6c8451dfce29eb90eaee7a26aa7448a3c5e1a5b623e2d2d8a41baecc5 |
import contextlib
import collections
import pickle
import re
import sys
from unittest import TestCase, main, skipUnless, SkipTest, expectedFailure
from copy import copy, deepcopy
from typing import Any, NoReturn
from typing import TypeVar, AnyStr
from typing import T, KT, VT # Not in __all__.
from typing import Union, Optional
from typing import Tuple, List, MutableMapping
from typing import Callable
from typing import Generic, ClassVar
from typing import cast
from typing import get_type_hints
from typing import no_type_check, no_type_check_decorator
from typing import Type
from typing import NewType
from typing import NamedTuple
from typing import IO, TextIO, BinaryIO
from typing import Pattern, Match
import abc
import typing
import weakref
from test import mod_generics_cache
class BaseTestCase(TestCase):
def assertIsSubclass(self, cls, class_or_tuple, msg=None):
if not issubclass(cls, class_or_tuple):
message = '%r is not a subclass of %r' % (cls, class_or_tuple)
if msg is not None:
message += ' : %s' % msg
raise self.failureException(message)
def assertNotIsSubclass(self, cls, class_or_tuple, msg=None):
if issubclass(cls, class_or_tuple):
message = '%r is a subclass of %r' % (cls, class_or_tuple)
if msg is not None:
message += ' : %s' % msg
raise self.failureException(message)
def clear_caches(self):
for f in typing._cleanups:
f()
class Employee:
pass
class Manager(Employee):
pass
class Founder(Employee):
pass
class ManagingFounder(Manager, Founder):
pass
class AnyTests(BaseTestCase):
def test_any_instance_type_error(self):
with self.assertRaises(TypeError):
isinstance(42, Any)
def test_any_subclass_type_error(self):
with self.assertRaises(TypeError):
issubclass(Employee, Any)
with self.assertRaises(TypeError):
issubclass(Any, Employee)
def test_repr(self):
self.assertEqual(repr(Any), 'typing.Any')
def test_errors(self):
with self.assertRaises(TypeError):
issubclass(42, Any)
with self.assertRaises(TypeError):
Any[int] # Any is not a generic type.
def test_cannot_subclass(self):
with self.assertRaises(TypeError):
class A(Any):
pass
with self.assertRaises(TypeError):
class A(type(Any)):
pass
def test_cannot_instantiate(self):
with self.assertRaises(TypeError):
Any()
with self.assertRaises(TypeError):
type(Any)()
def test_any_works_with_alias(self):
# These expressions must simply not fail.
typing.Match[Any]
typing.Pattern[Any]
typing.IO[Any]
class NoReturnTests(BaseTestCase):
def test_noreturn_instance_type_error(self):
with self.assertRaises(TypeError):
isinstance(42, NoReturn)
def test_noreturn_subclass_type_error(self):
with self.assertRaises(TypeError):
issubclass(Employee, NoReturn)
with self.assertRaises(TypeError):
issubclass(NoReturn, Employee)
def test_repr(self):
self.assertEqual(repr(NoReturn), 'typing.NoReturn')
def test_not_generic(self):
with self.assertRaises(TypeError):
NoReturn[int]
def test_cannot_subclass(self):
with self.assertRaises(TypeError):
class A(NoReturn):
pass
with self.assertRaises(TypeError):
class A(type(NoReturn)):
pass
def test_cannot_instantiate(self):
with self.assertRaises(TypeError):
NoReturn()
with self.assertRaises(TypeError):
type(NoReturn)()
class TypeVarTests(BaseTestCase):
def test_basic_plain(self):
T = TypeVar('T')
# T equals itself.
self.assertEqual(T, T)
# T is an instance of TypeVar
self.assertIsInstance(T, TypeVar)
def test_typevar_instance_type_error(self):
T = TypeVar('T')
with self.assertRaises(TypeError):
isinstance(42, T)
def test_typevar_subclass_type_error(self):
T = TypeVar('T')
with self.assertRaises(TypeError):
issubclass(int, T)
with self.assertRaises(TypeError):
issubclass(T, int)
def test_constrained_error(self):
with self.assertRaises(TypeError):
X = TypeVar('X', int)
X
def test_union_unique(self):
X = TypeVar('X')
Y = TypeVar('Y')
self.assertNotEqual(X, Y)
self.assertEqual(Union[X], X)
self.assertNotEqual(Union[X], Union[X, Y])
self.assertEqual(Union[X, X], X)
self.assertNotEqual(Union[X, int], Union[X])
self.assertNotEqual(Union[X, int], Union[int])
self.assertEqual(Union[X, int].__args__, (X, int))
self.assertEqual(Union[X, int].__parameters__, (X,))
self.assertIs(Union[X, int].__origin__, Union)
def test_union_constrained(self):
A = TypeVar('A', str, bytes)
self.assertNotEqual(Union[A, str], Union[A])
def test_repr(self):
self.assertEqual(repr(T), '~T')
self.assertEqual(repr(KT), '~KT')
self.assertEqual(repr(VT), '~VT')
self.assertEqual(repr(AnyStr), '~AnyStr')
T_co = TypeVar('T_co', covariant=True)
self.assertEqual(repr(T_co), '+T_co')
T_contra = TypeVar('T_contra', contravariant=True)
self.assertEqual(repr(T_contra), '-T_contra')
def test_no_redefinition(self):
self.assertNotEqual(TypeVar('T'), TypeVar('T'))
self.assertNotEqual(TypeVar('T', int, str), TypeVar('T', int, str))
def test_cannot_subclass_vars(self):
with self.assertRaises(TypeError):
class V(TypeVar('T')):
pass
def test_cannot_subclass_var_itself(self):
with self.assertRaises(TypeError):
class V(TypeVar):
pass
def test_cannot_instantiate_vars(self):
with self.assertRaises(TypeError):
TypeVar('A')()
def test_bound_errors(self):
with self.assertRaises(TypeError):
TypeVar('X', bound=42)
with self.assertRaises(TypeError):
TypeVar('X', str, float, bound=Employee)
def test_no_bivariant(self):
with self.assertRaises(ValueError):
TypeVar('T', covariant=True, contravariant=True)
class UnionTests(BaseTestCase):
def test_basics(self):
u = Union[int, float]
self.assertNotEqual(u, Union)
def test_subclass_error(self):
with self.assertRaises(TypeError):
issubclass(int, Union)
with self.assertRaises(TypeError):
issubclass(Union, int)
with self.assertRaises(TypeError):
issubclass(int, Union[int, str])
with self.assertRaises(TypeError):
issubclass(Union[int, str], int)
def test_union_any(self):
u = Union[Any]
self.assertEqual(u, Any)
u1 = Union[int, Any]
u2 = Union[Any, int]
u3 = Union[Any, object]
self.assertEqual(u1, u2)
self.assertNotEqual(u1, Any)
self.assertNotEqual(u2, Any)
self.assertNotEqual(u3, Any)
def test_union_object(self):
u = Union[object]
self.assertEqual(u, object)
u1 = Union[int, object]
u2 = Union[object, int]
self.assertEqual(u1, u2)
self.assertNotEqual(u1, object)
self.assertNotEqual(u2, object)
def test_unordered(self):
u1 = Union[int, float]
u2 = Union[float, int]
self.assertEqual(u1, u2)
def test_single_class_disappears(self):
t = Union[Employee]
self.assertIs(t, Employee)
def test_base_class_kept(self):
u = Union[Employee, Manager]
self.assertNotEqual(u, Employee)
self.assertIn(Employee, u.__args__)
self.assertIn(Manager, u.__args__)
def test_union_union(self):
u = Union[int, float]
v = Union[u, Employee]
self.assertEqual(v, Union[int, float, Employee])
def test_repr(self):
self.assertEqual(repr(Union), 'typing.Union')
u = Union[Employee, int]
self.assertEqual(repr(u), 'typing.Union[%s.Employee, int]' % __name__)
u = Union[int, Employee]
self.assertEqual(repr(u), 'typing.Union[int, %s.Employee]' % __name__)
T = TypeVar('T')
u = Union[T, int][int]
self.assertEqual(repr(u), repr(int))
u = Union[List[int], int]
self.assertEqual(repr(u), 'typing.Union[typing.List[int], int]')
def test_cannot_subclass(self):
with self.assertRaises(TypeError):
class C(Union):
pass
with self.assertRaises(TypeError):
class C(type(Union)):
pass
with self.assertRaises(TypeError):
class C(Union[int, str]):
pass
def test_cannot_instantiate(self):
with self.assertRaises(TypeError):
Union()
with self.assertRaises(TypeError):
type(Union)()
u = Union[int, float]
with self.assertRaises(TypeError):
u()
with self.assertRaises(TypeError):
type(u)()
def test_union_generalization(self):
self.assertFalse(Union[str, typing.Iterable[int]] == str)
self.assertFalse(Union[str, typing.Iterable[int]] == typing.Iterable[int])
self.assertIn(str, Union[str, typing.Iterable[int]].__args__)
self.assertIn(typing.Iterable[int], Union[str, typing.Iterable[int]].__args__)
def test_union_compare_other(self):
self.assertNotEqual(Union, object)
self.assertNotEqual(Union, Any)
self.assertNotEqual(ClassVar, Union)
self.assertNotEqual(Optional, Union)
self.assertNotEqual([None], Optional)
self.assertNotEqual(Optional, typing.Mapping)
self.assertNotEqual(Optional[typing.MutableMapping], Union)
def test_optional(self):
o = Optional[int]
u = Union[int, None]
self.assertEqual(o, u)
def test_empty(self):
with self.assertRaises(TypeError):
Union[()]
def test_union_instance_type_error(self):
with self.assertRaises(TypeError):
isinstance(42, Union[int, str])
def test_no_eval_union(self):
u = Union[int, str]
def f(x: u): ...
self.assertIs(get_type_hints(f)['x'], u)
def test_function_repr_union(self):
def fun() -> int: ...
self.assertEqual(repr(Union[fun, int]), 'typing.Union[fun, int]')
def test_union_str_pattern(self):
# Shouldn't crash; see http://bugs.python.org/issue25390
A = Union[str, Pattern]
A
def test_etree(self):
# See https://github.com/python/typing/issues/229
# (Only relevant for Python 2.)
try:
from xml.etree.cElementTree import Element
except ImportError:
raise SkipTest("cElementTree not found")
Union[Element, str] # Shouldn't crash
def Elem(*args):
return Element(*args)
Union[Elem, str] # Nor should this
class TupleTests(BaseTestCase):
def test_basics(self):
with self.assertRaises(TypeError):
issubclass(Tuple, Tuple[int, str])
with self.assertRaises(TypeError):
issubclass(tuple, Tuple[int, str])
class TP(tuple): ...
self.assertTrue(issubclass(tuple, Tuple))
self.assertTrue(issubclass(TP, Tuple))
def test_equality(self):
self.assertEqual(Tuple[int], Tuple[int])
self.assertEqual(Tuple[int, ...], Tuple[int, ...])
self.assertNotEqual(Tuple[int], Tuple[int, int])
self.assertNotEqual(Tuple[int], Tuple[int, ...])
def test_tuple_subclass(self):
class MyTuple(tuple):
pass
self.assertTrue(issubclass(MyTuple, Tuple))
def test_tuple_instance_type_error(self):
with self.assertRaises(TypeError):
isinstance((0, 0), Tuple[int, int])
self.assertIsInstance((0, 0), Tuple)
def test_repr(self):
self.assertEqual(repr(Tuple), 'typing.Tuple')
self.assertEqual(repr(Tuple[()]), 'typing.Tuple[()]')
self.assertEqual(repr(Tuple[int, float]), 'typing.Tuple[int, float]')
self.assertEqual(repr(Tuple[int, ...]), 'typing.Tuple[int, ...]')
def test_errors(self):
with self.assertRaises(TypeError):
issubclass(42, Tuple)
with self.assertRaises(TypeError):
issubclass(42, Tuple[int])
class CallableTests(BaseTestCase):
def test_self_subclass(self):
with self.assertRaises(TypeError):
self.assertTrue(issubclass(type(lambda x: x), Callable[[int], int]))
self.assertTrue(issubclass(type(lambda x: x), Callable))
def test_eq_hash(self):
self.assertEqual(Callable[[int], int], Callable[[int], int])
self.assertEqual(len({Callable[[int], int], Callable[[int], int]}), 1)
self.assertNotEqual(Callable[[int], int], Callable[[int], str])
self.assertNotEqual(Callable[[int], int], Callable[[str], int])
self.assertNotEqual(Callable[[int], int], Callable[[int, int], int])
self.assertNotEqual(Callable[[int], int], Callable[[], int])
self.assertNotEqual(Callable[[int], int], Callable)
def test_cannot_instantiate(self):
with self.assertRaises(TypeError):
Callable()
with self.assertRaises(TypeError):
type(Callable)()
c = Callable[[int], str]
with self.assertRaises(TypeError):
c()
with self.assertRaises(TypeError):
type(c)()
def test_callable_wrong_forms(self):
with self.assertRaises(TypeError):
Callable[[...], int]
with self.assertRaises(TypeError):
Callable[(), int]
with self.assertRaises(TypeError):
Callable[[()], int]
with self.assertRaises(TypeError):
Callable[[int, 1], 2]
with self.assertRaises(TypeError):
Callable[int]
def test_callable_instance_works(self):
def f():
pass
self.assertIsInstance(f, Callable)
self.assertNotIsInstance(None, Callable)
def test_callable_instance_type_error(self):
def f():
pass
with self.assertRaises(TypeError):
self.assertIsInstance(f, Callable[[], None])
with self.assertRaises(TypeError):
self.assertIsInstance(f, Callable[[], Any])
with self.assertRaises(TypeError):
self.assertNotIsInstance(None, Callable[[], None])
with self.assertRaises(TypeError):
self.assertNotIsInstance(None, Callable[[], Any])
def test_repr(self):
ct0 = Callable[[], bool]
self.assertEqual(repr(ct0), 'typing.Callable[[], bool]')
ct2 = Callable[[str, float], int]
self.assertEqual(repr(ct2), 'typing.Callable[[str, float], int]')
ctv = Callable[..., str]
self.assertEqual(repr(ctv), 'typing.Callable[..., str]')
def test_callable_with_ellipsis(self):
def foo(a: Callable[..., T]):
pass
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Callable[..., T]})
def test_ellipsis_in_generic(self):
# Shouldn't crash; see https://github.com/python/typing/issues/259
typing.List[Callable[..., str]]
XK = TypeVar('XK', str, bytes)
XV = TypeVar('XV')
class SimpleMapping(Generic[XK, XV]):
def __getitem__(self, key: XK) -> XV:
...
def __setitem__(self, key: XK, value: XV):
...
def get(self, key: XK, default: XV = None) -> XV:
...
class MySimpleMapping(SimpleMapping[XK, XV]):
def __init__(self):
self.store = {}
def __getitem__(self, key: str):
return self.store[key]
def __setitem__(self, key: str, value):
self.store[key] = value
def get(self, key: str, default=None):
try:
return self.store[key]
except KeyError:
return default
class ProtocolTests(BaseTestCase):
def test_supports_int(self):
self.assertIsSubclass(int, typing.SupportsInt)
self.assertNotIsSubclass(str, typing.SupportsInt)
def test_supports_float(self):
self.assertIsSubclass(float, typing.SupportsFloat)
self.assertNotIsSubclass(str, typing.SupportsFloat)
def test_supports_complex(self):
# Note: complex itself doesn't have __complex__.
class C:
def __complex__(self):
return 0j
self.assertIsSubclass(C, typing.SupportsComplex)
self.assertNotIsSubclass(str, typing.SupportsComplex)
def test_supports_bytes(self):
# Note: bytes itself doesn't have __bytes__.
class B:
def __bytes__(self):
return b''
self.assertIsSubclass(B, typing.SupportsBytes)
self.assertNotIsSubclass(str, typing.SupportsBytes)
def test_supports_abs(self):
self.assertIsSubclass(float, typing.SupportsAbs)
self.assertIsSubclass(int, typing.SupportsAbs)
self.assertNotIsSubclass(str, typing.SupportsAbs)
def test_supports_round(self):
issubclass(float, typing.SupportsRound)
self.assertIsSubclass(float, typing.SupportsRound)
self.assertIsSubclass(int, typing.SupportsRound)
self.assertNotIsSubclass(str, typing.SupportsRound)
def test_reversible(self):
self.assertIsSubclass(list, typing.Reversible)
self.assertNotIsSubclass(int, typing.Reversible)
def test_protocol_instance_type_error(self):
with self.assertRaises(TypeError):
isinstance(0, typing.SupportsAbs)
class C1(typing.SupportsInt):
def __int__(self) -> int:
return 42
class C2(C1):
pass
c = C2()
self.assertIsInstance(c, C1)
class GenericTests(BaseTestCase):
def test_basics(self):
X = SimpleMapping[str, Any]
self.assertEqual(X.__parameters__, ())
with self.assertRaises(TypeError):
X[str]
with self.assertRaises(TypeError):
X[str, str]
Y = SimpleMapping[XK, str]
self.assertEqual(Y.__parameters__, (XK,))
Y[str]
with self.assertRaises(TypeError):
Y[str, str]
SM1 = SimpleMapping[str, int]
with self.assertRaises(TypeError):
issubclass(SM1, SimpleMapping)
self.assertIsInstance(SM1(), SimpleMapping)
def test_generic_errors(self):
T = TypeVar('T')
S = TypeVar('S')
with self.assertRaises(TypeError):
Generic[T]()
with self.assertRaises(TypeError):
Generic[T][T]
with self.assertRaises(TypeError):
Generic[T][S]
with self.assertRaises(TypeError):
class C(Generic[T], Generic[T]): ...
with self.assertRaises(TypeError):
isinstance([], List[int])
with self.assertRaises(TypeError):
issubclass(list, List[int])
with self.assertRaises(TypeError):
class NewGeneric(Generic): ...
with self.assertRaises(TypeError):
class MyGeneric(Generic[T], Generic[S]): ...
with self.assertRaises(TypeError):
class MyGeneric(List[T], Generic[S]): ...
def test_init(self):
T = TypeVar('T')
S = TypeVar('S')
with self.assertRaises(TypeError):
Generic[T, T]
with self.assertRaises(TypeError):
Generic[T, S, T]
def test_init_subclass(self):
class X(typing.Generic[T]):
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls.attr = 42
class Y(X):
pass
self.assertEqual(Y.attr, 42)
with self.assertRaises(AttributeError):
X.attr
X.attr = 1
Y.attr = 2
class Z(Y):
pass
class W(X[int]):
pass
self.assertEqual(Y.attr, 2)
self.assertEqual(Z.attr, 42)
self.assertEqual(W.attr, 42)
def test_repr(self):
self.assertEqual(repr(SimpleMapping),
f"<class '{__name__}.SimpleMapping'>")
self.assertEqual(repr(MySimpleMapping),
f"<class '{__name__}.MySimpleMapping'>")
def test_chain_repr(self):
T = TypeVar('T')
S = TypeVar('S')
class C(Generic[T]):
pass
X = C[Tuple[S, T]]
self.assertEqual(X, C[Tuple[S, T]])
self.assertNotEqual(X, C[Tuple[T, S]])
Y = X[T, int]
self.assertEqual(Y, X[T, int])
self.assertNotEqual(Y, X[S, int])
self.assertNotEqual(Y, X[T, str])
Z = Y[str]
self.assertEqual(Z, Y[str])
self.assertNotEqual(Z, Y[int])
self.assertNotEqual(Z, Y[T])
self.assertTrue(str(Z).endswith(
'.C[typing.Tuple[str, int]]'))
def test_new_repr(self):
T = TypeVar('T')
U = TypeVar('U', covariant=True)
S = TypeVar('S')
self.assertEqual(repr(List), 'typing.List')
self.assertEqual(repr(List[T]), 'typing.List[~T]')
self.assertEqual(repr(List[U]), 'typing.List[+U]')
self.assertEqual(repr(List[S][T][int]), 'typing.List[int]')
self.assertEqual(repr(List[int]), 'typing.List[int]')
def test_new_repr_complex(self):
T = TypeVar('T')
TS = TypeVar('TS')
self.assertEqual(repr(typing.Mapping[T, TS][TS, T]), 'typing.Mapping[~TS, ~T]')
self.assertEqual(repr(List[Tuple[T, TS]][int, T]),
'typing.List[typing.Tuple[int, ~T]]')
self.assertEqual(
repr(List[Tuple[T, T]][List[int]]),
'typing.List[typing.Tuple[typing.List[int], typing.List[int]]]'
)
def test_new_repr_bare(self):
T = TypeVar('T')
self.assertEqual(repr(Generic[T]), 'typing.Generic[~T]')
self.assertEqual(repr(typing._Protocol[T]), 'typing._Protocol[~T]')
class C(typing.Dict[Any, Any]): ...
# this line should just work
repr(C.__mro__)
def test_dict(self):
T = TypeVar('T')
class B(Generic[T]):
pass
b = B()
b.foo = 42
self.assertEqual(b.__dict__, {'foo': 42})
class C(B[int]):
pass
c = C()
c.bar = 'abc'
self.assertEqual(c.__dict__, {'bar': 'abc'})
def test_subscripted_generics_as_proxies(self):
T = TypeVar('T')
class C(Generic[T]):
x = 'def'
self.assertEqual(C[int].x, 'def')
self.assertEqual(C[C[int]].x, 'def')
C[C[int]].x = 'changed'
self.assertEqual(C.x, 'changed')
self.assertEqual(C[str].x, 'changed')
C[List[str]].z = 'new'
self.assertEqual(C.z, 'new')
self.assertEqual(C[Tuple[int]].z, 'new')
self.assertEqual(C().x, 'changed')
self.assertEqual(C[Tuple[str]]().z, 'new')
class D(C[T]):
pass
self.assertEqual(D[int].x, 'changed')
self.assertEqual(D.z, 'new')
D.z = 'from derived z'
D[int].x = 'from derived x'
self.assertEqual(C.x, 'changed')
self.assertEqual(C[int].z, 'new')
self.assertEqual(D.x, 'from derived x')
self.assertEqual(D[str].z, 'from derived z')
def test_abc_registry_kept(self):
T = TypeVar('T')
class C(collections.abc.Mapping, Generic[T]): ...
C.register(int)
self.assertIsInstance(1, C)
C[int]
self.assertIsInstance(1, C)
C._abc_registry_clear()
C._abc_caches_clear() # To keep refleak hunting mode clean
def test_false_subclasses(self):
class MyMapping(MutableMapping[str, str]): pass
self.assertNotIsInstance({}, MyMapping)
self.assertNotIsSubclass(dict, MyMapping)
def test_abc_bases(self):
class MM(MutableMapping[str, str]):
def __getitem__(self, k):
return None
def __setitem__(self, k, v):
pass
def __delitem__(self, k):
pass
def __iter__(self):
return iter(())
def __len__(self):
return 0
# this should just work
MM().update()
self.assertIsInstance(MM(), collections.abc.MutableMapping)
self.assertIsInstance(MM(), MutableMapping)
self.assertNotIsInstance(MM(), List)
self.assertNotIsInstance({}, MM)
def test_multiple_bases(self):
class MM1(MutableMapping[str, str], collections.abc.MutableMapping):
pass
class MM2(collections.abc.MutableMapping, MutableMapping[str, str]):
pass
self.assertEqual(MM2.__bases__, (collections.abc.MutableMapping, Generic))
def test_orig_bases(self):
T = TypeVar('T')
class C(typing.Dict[str, T]): ...
self.assertEqual(C.__orig_bases__, (typing.Dict[str, T],))
def test_naive_runtime_checks(self):
def naive_dict_check(obj, tp):
# Check if a dictionary conforms to Dict type
if len(tp.__parameters__) > 0:
raise NotImplementedError
if tp.__args__:
KT, VT = tp.__args__
return all(
isinstance(k, KT) and isinstance(v, VT)
for k, v in obj.items()
)
self.assertTrue(naive_dict_check({'x': 1}, typing.Dict[str, int]))
self.assertFalse(naive_dict_check({1: 'x'}, typing.Dict[str, int]))
with self.assertRaises(NotImplementedError):
naive_dict_check({1: 'x'}, typing.Dict[str, T])
def naive_generic_check(obj, tp):
# Check if an instance conforms to the generic class
if not hasattr(obj, '__orig_class__'):
raise NotImplementedError
return obj.__orig_class__ == tp
class Node(Generic[T]): ...
self.assertTrue(naive_generic_check(Node[int](), Node[int]))
self.assertFalse(naive_generic_check(Node[str](), Node[int]))
self.assertFalse(naive_generic_check(Node[str](), List))
with self.assertRaises(NotImplementedError):
naive_generic_check([1, 2, 3], Node[int])
def naive_list_base_check(obj, tp):
# Check if list conforms to a List subclass
return all(isinstance(x, tp.__orig_bases__[0].__args__[0])
for x in obj)
class C(List[int]): ...
self.assertTrue(naive_list_base_check([1, 2, 3], C))
self.assertFalse(naive_list_base_check(['a', 'b'], C))
def test_multi_subscr_base(self):
T = TypeVar('T')
U = TypeVar('U')
V = TypeVar('V')
class C(List[T][U][V]): ...
class D(C, List[T][U][V]): ...
self.assertEqual(C.__parameters__, (V,))
self.assertEqual(D.__parameters__, (V,))
self.assertEqual(C[int].__parameters__, ())
self.assertEqual(D[int].__parameters__, ())
self.assertEqual(C[int].__args__, (int,))
self.assertEqual(D[int].__args__, (int,))
self.assertEqual(C.__bases__, (list, Generic))
self.assertEqual(D.__bases__, (C, list, Generic))
self.assertEqual(C.__orig_bases__, (List[T][U][V],))
self.assertEqual(D.__orig_bases__, (C, List[T][U][V]))
def test_subscript_meta(self):
T = TypeVar('T')
class Meta(type): ...
self.assertEqual(Type[Meta], Type[Meta])
self.assertEqual(Union[T, int][Meta], Union[Meta, int])
self.assertEqual(Callable[..., Meta].__args__, (Ellipsis, Meta))
def test_generic_hashes(self):
class A(Generic[T]):
...
class B(Generic[T]):
class A(Generic[T]):
...
self.assertEqual(A, A)
self.assertEqual(mod_generics_cache.A[str], mod_generics_cache.A[str])
self.assertEqual(B.A, B.A)
self.assertEqual(mod_generics_cache.B.A[B.A[str]],
mod_generics_cache.B.A[B.A[str]])
self.assertNotEqual(A, B.A)
self.assertNotEqual(A, mod_generics_cache.A)
self.assertNotEqual(A, mod_generics_cache.B.A)
self.assertNotEqual(B.A, mod_generics_cache.A)
self.assertNotEqual(B.A, mod_generics_cache.B.A)
self.assertNotEqual(A[str], B.A[str])
self.assertNotEqual(A[List[Any]], B.A[List[Any]])
self.assertNotEqual(A[str], mod_generics_cache.A[str])
self.assertNotEqual(A[str], mod_generics_cache.B.A[str])
self.assertNotEqual(B.A[int], mod_generics_cache.A[int])
self.assertNotEqual(B.A[List[Any]], mod_generics_cache.B.A[List[Any]])
self.assertNotEqual(Tuple[A[str]], Tuple[B.A[str]])
self.assertNotEqual(Tuple[A[List[Any]]], Tuple[B.A[List[Any]]])
self.assertNotEqual(Union[str, A[str]], Union[str, mod_generics_cache.A[str]])
self.assertNotEqual(Union[A[str], A[str]],
Union[A[str], mod_generics_cache.A[str]])
self.assertNotEqual(typing.FrozenSet[A[str]],
typing.FrozenSet[mod_generics_cache.B.A[str]])
if sys.version_info[:2] > (3, 2):
self.assertTrue(repr(Tuple[A[str]]).endswith('<locals>.A[str]]'))
self.assertTrue(repr(Tuple[B.A[str]]).endswith('<locals>.B.A[str]]'))
self.assertTrue(repr(Tuple[mod_generics_cache.A[str]])
.endswith('mod_generics_cache.A[str]]'))
self.assertTrue(repr(Tuple[mod_generics_cache.B.A[str]])
.endswith('mod_generics_cache.B.A[str]]'))
def test_extended_generic_rules_eq(self):
T = TypeVar('T')
U = TypeVar('U')
self.assertEqual(Tuple[T, T][int], Tuple[int, int])
self.assertEqual(typing.Iterable[Tuple[T, T]][T], typing.Iterable[Tuple[T, T]])
with self.assertRaises(TypeError):
Tuple[T, int][()]
with self.assertRaises(TypeError):
Tuple[T, U][T, ...]
self.assertEqual(Union[T, int][int], int)
self.assertEqual(Union[T, U][int, Union[int, str]], Union[int, str])
class Base: ...
class Derived(Base): ...
self.assertEqual(Union[T, Base][Union[Base, Derived]], Union[Base, Derived])
with self.assertRaises(TypeError):
Union[T, int][1]
self.assertEqual(Callable[[T], T][KT], Callable[[KT], KT])
self.assertEqual(Callable[..., List[T]][int], Callable[..., List[int]])
with self.assertRaises(TypeError):
Callable[[T], U][..., int]
with self.assertRaises(TypeError):
Callable[[T], U][[], int]
def test_extended_generic_rules_repr(self):
T = TypeVar('T')
self.assertEqual(repr(Union[Tuple, Callable]).replace('typing.', ''),
'Union[Tuple, Callable]')
self.assertEqual(repr(Union[Tuple, Tuple[int]]).replace('typing.', ''),
'Union[Tuple, Tuple[int]]')
self.assertEqual(repr(Callable[..., Optional[T]][int]).replace('typing.', ''),
'Callable[..., Union[int, NoneType]]')
self.assertEqual(repr(Callable[[], List[T]][int]).replace('typing.', ''),
'Callable[[], List[int]]')
def test_generic_forward_ref(self):
def foobar(x: List[List['CC']]): ...
class CC: ...
self.assertEqual(
get_type_hints(foobar, globals(), locals()),
{'x': List[List[CC]]}
)
T = TypeVar('T')
AT = Tuple[T, ...]
def barfoo(x: AT): ...
self.assertIs(get_type_hints(barfoo, globals(), locals())['x'], AT)
CT = Callable[..., List[T]]
def barfoo2(x: CT): ...
self.assertIs(get_type_hints(barfoo2, globals(), locals())['x'], CT)
def test_extended_generic_rules_subclassing(self):
class T1(Tuple[T, KT]): ...
class T2(Tuple[T, ...]): ...
class C1(Callable[[T], T]): ...
class C2(Callable[..., int]):
def __call__(self):
return None
self.assertEqual(T1.__parameters__, (T, KT))
self.assertEqual(T1[int, str].__args__, (int, str))
self.assertEqual(T1[int, T].__origin__, T1)
self.assertEqual(T2.__parameters__, (T,))
with self.assertRaises(TypeError):
T1[int]
with self.assertRaises(TypeError):
T2[int, str]
self.assertEqual(repr(C1[int]).split('.')[-1], 'C1[int]')
self.assertEqual(C2.__parameters__, ())
self.assertIsInstance(C2(), collections.abc.Callable)
self.assertIsSubclass(C2, collections.abc.Callable)
self.assertIsSubclass(C1, collections.abc.Callable)
self.assertIsInstance(T1(), tuple)
self.assertIsSubclass(T2, tuple)
with self.assertRaises(TypeError):
issubclass(Tuple[int, ...], typing.Sequence)
with self.assertRaises(TypeError):
issubclass(Tuple[int, ...], typing.Iterable)
def test_fail_with_bare_union(self):
with self.assertRaises(TypeError):
List[Union]
with self.assertRaises(TypeError):
Tuple[Optional]
with self.assertRaises(TypeError):
ClassVar[ClassVar]
with self.assertRaises(TypeError):
List[ClassVar[int]]
def test_fail_with_bare_generic(self):
T = TypeVar('T')
with self.assertRaises(TypeError):
List[Generic]
with self.assertRaises(TypeError):
Tuple[Generic[T]]
with self.assertRaises(TypeError):
List[typing._Protocol]
def test_type_erasure_special(self):
T = TypeVar('T')
# this is the only test that checks type caching
self.clear_caches()
class MyTup(Tuple[T, T]): ...
self.assertIs(MyTup[int]().__class__, MyTup)
self.assertIs(MyTup[int]().__orig_class__, MyTup[int])
class MyCall(Callable[..., T]):
def __call__(self): return None
self.assertIs(MyCall[T]().__class__, MyCall)
self.assertIs(MyCall[T]().__orig_class__, MyCall[T])
class MyDict(typing.Dict[T, T]): ...
self.assertIs(MyDict[int]().__class__, MyDict)
self.assertIs(MyDict[int]().__orig_class__, MyDict[int])
class MyDef(typing.DefaultDict[str, T]): ...
self.assertIs(MyDef[int]().__class__, MyDef)
self.assertIs(MyDef[int]().__orig_class__, MyDef[int])
# ChainMap was added in 3.3
if sys.version_info >= (3, 3):
class MyChain(typing.ChainMap[str, T]): ...
self.assertIs(MyChain[int]().__class__, MyChain)
self.assertIs(MyChain[int]().__orig_class__, MyChain[int])
def test_all_repr_eq_any(self):
objs = (getattr(typing, el) for el in typing.__all__)
for obj in objs:
self.assertNotEqual(repr(obj), '')
self.assertEqual(obj, obj)
if getattr(obj, '__parameters__', None) and len(obj.__parameters__) == 1:
self.assertEqual(obj[Any].__args__, (Any,))
if isinstance(obj, type):
for base in obj.__mro__:
self.assertNotEqual(repr(base), '')
self.assertEqual(base, base)
def test_pickle(self):
global C # pickle wants to reference the class by name
T = TypeVar('T')
class B(Generic[T]):
pass
class C(B[int]):
pass
c = C()
c.foo = 42
c.bar = 'abc'
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(c, proto)
x = pickle.loads(z)
self.assertEqual(x.foo, 42)
self.assertEqual(x.bar, 'abc')
self.assertEqual(x.__dict__, {'foo': 42, 'bar': 'abc'})
samples = [Any, Union, Tuple, Callable, ClassVar,
Union[int, str], ClassVar[List], Tuple[int, ...], Callable[[str], bytes],
typing.DefaultDict, typing.FrozenSet[int]]
for s in samples:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(s, proto)
x = pickle.loads(z)
self.assertEqual(s, x)
more_samples = [List, typing.Iterable, typing.Type, List[int],
typing.Type[typing.Mapping], typing.AbstractSet[Tuple[int, str]]]
for s in more_samples:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(s, proto)
x = pickle.loads(z)
self.assertEqual(s, x)
def test_copy_and_deepcopy(self):
T = TypeVar('T')
class Node(Generic[T]): ...
things = [Union[T, int], Tuple[T, int], Callable[..., T], Callable[[int], int],
Tuple[Any, Any], Node[T], Node[int], Node[Any], typing.Iterable[T],
typing.Iterable[Any], typing.Iterable[int], typing.Dict[int, str],
typing.Dict[T, Any], ClassVar[int], ClassVar[List[T]], Tuple['T', 'T'],
Union['T', int], List['T'], typing.Mapping['T', int]]
for t in things + [Any]:
self.assertEqual(t, copy(t))
self.assertEqual(t, deepcopy(t))
def test_immutability_by_copy_and_pickle(self):
# Special forms like Union, Any, etc., generic aliases to containers like List,
# Mapping, etc., and type variabcles are considered immutable by copy and pickle.
global TP, TPB, TPV # for pickle
TP = TypeVar('TP')
TPB = TypeVar('TPB', bound=int)
TPV = TypeVar('TPV', bytes, str)
for X in [TP, TPB, TPV, List, typing.Mapping, ClassVar, typing.Iterable,
Union, Any, Tuple, Callable]:
self.assertIs(copy(X), X)
self.assertIs(deepcopy(X), X)
self.assertIs(pickle.loads(pickle.dumps(X)), X)
# Check that local type variables are copyable.
TL = TypeVar('TL')
TLB = TypeVar('TLB', bound=int)
TLV = TypeVar('TLV', bytes, str)
for X in [TL, TLB, TLV]:
self.assertIs(copy(X), X)
self.assertIs(deepcopy(X), X)
def test_copy_generic_instances(self):
T = TypeVar('T')
class C(Generic[T]):
def __init__(self, attr: T) -> None:
self.attr = attr
c = C(42)
self.assertEqual(copy(c).attr, 42)
self.assertEqual(deepcopy(c).attr, 42)
self.assertIsNot(copy(c), c)
self.assertIsNot(deepcopy(c), c)
c.attr = 1
self.assertEqual(copy(c).attr, 1)
self.assertEqual(deepcopy(c).attr, 1)
ci = C[int](42)
self.assertEqual(copy(ci).attr, 42)
self.assertEqual(deepcopy(ci).attr, 42)
self.assertIsNot(copy(ci), ci)
self.assertIsNot(deepcopy(ci), ci)
ci.attr = 1
self.assertEqual(copy(ci).attr, 1)
self.assertEqual(deepcopy(ci).attr, 1)
self.assertEqual(ci.__orig_class__, C[int])
def test_weakref_all(self):
T = TypeVar('T')
things = [Any, Union[T, int], Callable[..., T], Tuple[Any, Any],
Optional[List[int]], typing.Mapping[int, str],
typing.re.Match[bytes], typing.Iterable['whatever']]
for t in things:
self.assertEqual(weakref.ref(t)(), t)
def test_parameterized_slots(self):
T = TypeVar('T')
class C(Generic[T]):
__slots__ = ('potato',)
c = C()
c_int = C[int]()
c.potato = 0
c_int.potato = 0
with self.assertRaises(AttributeError):
c.tomato = 0
with self.assertRaises(AttributeError):
c_int.tomato = 0
def foo(x: C['C']): ...
self.assertEqual(get_type_hints(foo, globals(), locals())['x'], C[C])
self.assertEqual(copy(C[int]), deepcopy(C[int]))
def test_parameterized_slots_dict(self):
T = TypeVar('T')
class D(Generic[T]):
__slots__ = {'banana': 42}
d = D()
d_int = D[int]()
d.banana = 'yes'
d_int.banana = 'yes'
with self.assertRaises(AttributeError):
d.foobar = 'no'
with self.assertRaises(AttributeError):
d_int.foobar = 'no'
def test_errors(self):
with self.assertRaises(TypeError):
B = SimpleMapping[XK, Any]
class C(Generic[B]):
pass
def test_repr_2(self):
class C(Generic[T]):
pass
self.assertEqual(C.__module__, __name__)
self.assertEqual(C.__qualname__,
'GenericTests.test_repr_2.<locals>.C')
X = C[int]
self.assertEqual(X.__module__, __name__)
self.assertEqual(repr(X).split('.')[-1], 'C[int]')
class Y(C[int]):
pass
self.assertEqual(Y.__module__, __name__)
self.assertEqual(Y.__qualname__,
'GenericTests.test_repr_2.<locals>.Y')
def test_eq_1(self):
self.assertEqual(Generic, Generic)
self.assertEqual(Generic[T], Generic[T])
self.assertNotEqual(Generic[KT], Generic[VT])
def test_eq_2(self):
class A(Generic[T]):
pass
class B(Generic[T]):
pass
self.assertEqual(A, A)
self.assertNotEqual(A, B)
self.assertEqual(A[T], A[T])
self.assertNotEqual(A[T], B[T])
def test_multiple_inheritance(self):
class A(Generic[T, VT]):
pass
class B(Generic[KT, T]):
pass
class C(A[T, VT], Generic[VT, T, KT], B[KT, T]):
pass
self.assertEqual(C.__parameters__, (VT, T, KT))
def test_multiple_inheritance_special(self):
S = TypeVar('S')
class B(Generic[S]): ...
class C(List[int], B): ...
self.assertEqual(C.__mro__, (C, list, B, Generic, object))
def test_init_subclass_super_called(self):
class FinalException(Exception):
pass
class Final:
def __init_subclass__(cls, **kwargs) -> None:
for base in cls.__bases__:
if base is not Final and issubclass(base, Final):
raise FinalException(base)
super().__init_subclass__(**kwargs)
class Test(Generic[T], Final):
pass
with self.assertRaises(FinalException):
class Subclass(Test):
pass
with self.assertRaises(FinalException):
class Subclass(Test[int]):
pass
def test_nested(self):
G = Generic
class Visitor(G[T]):
a = None
def set(self, a: T):
self.a = a
def get(self):
return self.a
def visit(self) -> T:
return self.a
V = Visitor[typing.List[int]]
class IntListVisitor(V):
def append(self, x: int):
self.a.append(x)
a = IntListVisitor()
a.set([])
a.append(1)
a.append(42)
self.assertEqual(a.get(), [1, 42])
def test_type_erasure(self):
T = TypeVar('T')
class Node(Generic[T]):
def __init__(self, label: T,
left: 'Node[T]' = None,
right: 'Node[T]' = None):
self.label = label # type: T
self.left = left # type: Optional[Node[T]]
self.right = right # type: Optional[Node[T]]
def foo(x: T):
a = Node(x)
b = Node[T](x)
c = Node[Any](x)
self.assertIs(type(a), Node)
self.assertIs(type(b), Node)
self.assertIs(type(c), Node)
self.assertEqual(a.label, x)
self.assertEqual(b.label, x)
self.assertEqual(c.label, x)
foo(42)
def test_implicit_any(self):
T = TypeVar('T')
class C(Generic[T]):
pass
class D(C):
pass
self.assertEqual(D.__parameters__, ())
with self.assertRaises(Exception):
D[int]
with self.assertRaises(Exception):
D[Any]
with self.assertRaises(Exception):
D[T]
def test_new_with_args(self):
class A(Generic[T]):
pass
class B:
def __new__(cls, arg):
# call object
obj = super().__new__(cls)
obj.arg = arg
return obj
# mro: C, A, Generic, B, object
class C(A, B):
pass
c = C('foo')
self.assertEqual(c.arg, 'foo')
def test_new_with_args2(self):
class A:
def __init__(self, arg):
self.from_a = arg
# call object
super().__init__()
# mro: C, Generic, A, object
class C(Generic[T], A):
def __init__(self, arg):
self.from_c = arg
# call Generic
super().__init__(arg)
c = C('foo')
self.assertEqual(c.from_a, 'foo')
self.assertEqual(c.from_c, 'foo')
def test_new_no_args(self):
class A(Generic[T]):
pass
with self.assertRaises(TypeError):
A('foo')
class B:
def __new__(cls):
# call object
obj = super().__new__(cls)
obj.from_b = 'b'
return obj
# mro: C, A, Generic, B, object
class C(A, B):
def __init__(self, arg):
self.arg = arg
def __new__(cls, arg):
# call A
obj = super().__new__(cls)
obj.from_c = 'c'
return obj
c = C('foo')
self.assertEqual(c.arg, 'foo')
self.assertEqual(c.from_b, 'b')
self.assertEqual(c.from_c, 'c')
class ClassVarTests(BaseTestCase):
def test_basics(self):
with self.assertRaises(TypeError):
ClassVar[1]
with self.assertRaises(TypeError):
ClassVar[int, str]
with self.assertRaises(TypeError):
ClassVar[int][str]
def test_repr(self):
self.assertEqual(repr(ClassVar), 'typing.ClassVar')
cv = ClassVar[int]
self.assertEqual(repr(cv), 'typing.ClassVar[int]')
cv = ClassVar[Employee]
self.assertEqual(repr(cv), 'typing.ClassVar[%s.Employee]' % __name__)
def test_cannot_subclass(self):
with self.assertRaises(TypeError):
class C(type(ClassVar)):
pass
with self.assertRaises(TypeError):
class C(type(ClassVar[int])):
pass
def test_cannot_init(self):
with self.assertRaises(TypeError):
ClassVar()
with self.assertRaises(TypeError):
type(ClassVar)()
with self.assertRaises(TypeError):
type(ClassVar[Optional[int]])()
def test_no_isinstance(self):
with self.assertRaises(TypeError):
isinstance(1, ClassVar[int])
with self.assertRaises(TypeError):
issubclass(int, ClassVar)
class CastTests(BaseTestCase):
def test_basics(self):
self.assertEqual(cast(int, 42), 42)
self.assertEqual(cast(float, 42), 42)
self.assertIs(type(cast(float, 42)), int)
self.assertEqual(cast(Any, 42), 42)
self.assertEqual(cast(list, 42), 42)
self.assertEqual(cast(Union[str, float], 42), 42)
self.assertEqual(cast(AnyStr, 42), 42)
self.assertEqual(cast(None, 42), 42)
def test_errors(self):
# Bogus calls are not expected to fail.
cast(42, 42)
cast('hello', 42)
class ForwardRefTests(BaseTestCase):
def test_basics(self):
class Node(Generic[T]):
def __init__(self, label: T):
self.label = label
self.left = self.right = None
def add_both(self,
left: 'Optional[Node[T]]',
right: 'Node[T]' = None,
stuff: int = None,
blah=None):
self.left = left
self.right = right
def add_left(self, node: Optional['Node[T]']):
self.add_both(node, None)
def add_right(self, node: 'Node[T]' = None):
self.add_both(None, node)
t = Node[int]
both_hints = get_type_hints(t.add_both, globals(), locals())
self.assertEqual(both_hints['left'], Optional[Node[T]])
self.assertEqual(both_hints['right'], Optional[Node[T]])
self.assertEqual(both_hints['left'], both_hints['right'])
self.assertEqual(both_hints['stuff'], Optional[int])
self.assertNotIn('blah', both_hints)
left_hints = get_type_hints(t.add_left, globals(), locals())
self.assertEqual(left_hints['node'], Optional[Node[T]])
right_hints = get_type_hints(t.add_right, globals(), locals())
self.assertEqual(right_hints['node'], Optional[Node[T]])
def test_forwardref_instance_type_error(self):
fr = typing.ForwardRef('int')
with self.assertRaises(TypeError):
isinstance(42, fr)
def test_forwardref_subclass_type_error(self):
fr = typing.ForwardRef('int')
with self.assertRaises(TypeError):
issubclass(int, fr)
def test_forward_equality(self):
fr = typing.ForwardRef('int')
self.assertEqual(fr, typing.ForwardRef('int'))
self.assertNotEqual(List['int'], List[int])
def test_forward_repr(self):
self.assertEqual(repr(List['int']), "typing.List[ForwardRef('int')]")
def test_union_forward(self):
def foo(a: Union['T']):
pass
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Union[T]})
def test_tuple_forward(self):
def foo(a: Tuple['T']):
pass
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Tuple[T]})
def test_callable_forward(self):
def foo(a: Callable[['T'], 'T']):
pass
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Callable[[T], T]})
def test_callable_with_ellipsis_forward(self):
def foo(a: 'Callable[..., T]'):
pass
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Callable[..., T]})
def test_syntax_error(self):
with self.assertRaises(SyntaxError):
Generic['/T']
def test_delayed_syntax_error(self):
def foo(a: 'Node[T'):
pass
with self.assertRaises(SyntaxError):
get_type_hints(foo)
def test_type_error(self):
def foo(a: Tuple['42']):
pass
with self.assertRaises(TypeError):
get_type_hints(foo)
def test_name_error(self):
def foo(a: 'Noode[T]'):
pass
with self.assertRaises(NameError):
get_type_hints(foo, locals())
def test_no_type_check(self):
@no_type_check
def foo(a: 'whatevers') -> {}:
pass
th = get_type_hints(foo)
self.assertEqual(th, {})
def test_no_type_check_class(self):
@no_type_check
class C:
def foo(a: 'whatevers') -> {}:
pass
cth = get_type_hints(C.foo)
self.assertEqual(cth, {})
ith = get_type_hints(C().foo)
self.assertEqual(ith, {})
def test_no_type_check_no_bases(self):
class C:
def meth(self, x: int): ...
@no_type_check
class D(C):
c = C
# verify that @no_type_check never affects bases
self.assertEqual(get_type_hints(C.meth), {'x': int})
def test_no_type_check_forward_ref_as_string(self):
class C:
foo: typing.ClassVar[int] = 7
class D:
foo: ClassVar[int] = 7
class E:
foo: 'typing.ClassVar[int]' = 7
class F:
foo: 'ClassVar[int]' = 7
expected_result = {'foo': typing.ClassVar[int]}
for clazz in [C, D, E, F]:
self.assertEqual(get_type_hints(clazz), expected_result)
def test_nested_classvar_fails_forward_ref_check(self):
class E:
foo: 'typing.ClassVar[typing.ClassVar[int]]' = 7
class F:
foo: ClassVar['ClassVar[int]'] = 7
for clazz in [E, F]:
with self.assertRaises(TypeError):
get_type_hints(clazz)
def test_meta_no_type_check(self):
@no_type_check_decorator
def magic_decorator(func):
return func
self.assertEqual(magic_decorator.__name__, 'magic_decorator')
@magic_decorator
def foo(a: 'whatevers') -> {}:
pass
@magic_decorator
class C:
def foo(a: 'whatevers') -> {}:
pass
self.assertEqual(foo.__name__, 'foo')
th = get_type_hints(foo)
self.assertEqual(th, {})
cth = get_type_hints(C.foo)
self.assertEqual(cth, {})
ith = get_type_hints(C().foo)
self.assertEqual(ith, {})
def test_default_globals(self):
code = ("class C:\n"
" def foo(self, a: 'C') -> 'D': pass\n"
"class D:\n"
" def bar(self, b: 'D') -> C: pass\n"
)
ns = {}
exec(code, ns)
hints = get_type_hints(ns['C'].foo)
self.assertEqual(hints, {'a': ns['C'], 'return': ns['D']})
class OverloadTests(BaseTestCase):
def test_overload_fails(self):
from typing import overload
with self.assertRaises(RuntimeError):
@overload
def blah():
pass
blah()
def test_overload_succeeds(self):
from typing import overload
@overload
def blah():
pass
def blah():
pass
blah()
ASYNCIO_TESTS = """
import asyncio
T_a = TypeVar('T_a')
class AwaitableWrapper(typing.Awaitable[T_a]):
def __init__(self, value):
self.value = value
def __await__(self) -> typing.Iterator[T_a]:
yield
return self.value
class AsyncIteratorWrapper(typing.AsyncIterator[T_a]):
def __init__(self, value: typing.Iterable[T_a]):
self.value = value
def __aiter__(self) -> typing.AsyncIterator[T_a]:
return self
@asyncio.coroutine
def __anext__(self) -> T_a:
data = yield from self.value
if data:
return data
else:
raise StopAsyncIteration
class ACM:
async def __aenter__(self) -> int:
return 42
async def __aexit__(self, etype, eval, tb):
return None
"""
try:
exec(ASYNCIO_TESTS)
except ImportError:
ASYNCIO = False # multithreading is not enabled
else:
ASYNCIO = True
# Definitions needed for features introduced in Python 3.6
from test import ann_module, ann_module2, ann_module3
from typing import AsyncContextManager
class A:
y: float
class B(A):
x: ClassVar[Optional['B']] = None
y: int
b: int
class CSub(B):
z: ClassVar['CSub'] = B()
class G(Generic[T]):
lst: ClassVar[List[T]] = []
class NoneAndForward:
parent: 'NoneAndForward'
meaning: None
class CoolEmployee(NamedTuple):
name: str
cool: int
class CoolEmployeeWithDefault(NamedTuple):
name: str
cool: int = 0
class XMeth(NamedTuple):
x: int
def double(self):
return 2 * self.x
class XRepr(NamedTuple):
x: int
y: int = 1
def __str__(self):
return f'{self.x} -> {self.y}'
def __add__(self, other):
return 0
class HasForeignBaseClass(mod_generics_cache.A):
some_xrepr: 'XRepr'
other_a: 'mod_generics_cache.A'
async def g_with(am: AsyncContextManager[int]):
x: int
async with am as x:
return x
try:
g_with(ACM()).send(None)
except StopIteration as e:
assert e.args[0] == 42
gth = get_type_hints
class GetTypeHintTests(BaseTestCase):
def test_get_type_hints_from_various_objects(self):
# For invalid objects should fail with TypeError (not AttributeError etc).
with self.assertRaises(TypeError):
gth(123)
with self.assertRaises(TypeError):
gth('abc')
with self.assertRaises(TypeError):
gth(None)
def test_get_type_hints_modules(self):
ann_module_type_hints = {1: 2, 'f': Tuple[int, int], 'x': int, 'y': str}
self.assertEqual(gth(ann_module), ann_module_type_hints)
self.assertEqual(gth(ann_module2), {})
self.assertEqual(gth(ann_module3), {})
@expectedFailure
def test_get_type_hints_modules_forwardref(self):
# FIXME: This currently exposes a bug in typing. Cached forward references
# don't account for the case where there are multiple types of the same
# name coming from different modules in the same program.
mgc_hints = {'default_a': Optional[mod_generics_cache.A],
'default_b': Optional[mod_generics_cache.B]}
self.assertEqual(gth(mod_generics_cache), mgc_hints)
def test_get_type_hints_classes(self):
self.assertEqual(gth(ann_module.C), # gth will find the right globalns
{'y': Optional[ann_module.C]})
self.assertIsInstance(gth(ann_module.j_class), dict)
self.assertEqual(gth(ann_module.M), {'123': 123, 'o': type})
self.assertEqual(gth(ann_module.D),
{'j': str, 'k': str, 'y': Optional[ann_module.C]})
self.assertEqual(gth(ann_module.Y), {'z': int})
self.assertEqual(gth(ann_module.h_class),
{'y': Optional[ann_module.C]})
self.assertEqual(gth(ann_module.S), {'x': str, 'y': str})
self.assertEqual(gth(ann_module.foo), {'x': int})
self.assertEqual(gth(NoneAndForward),
{'parent': NoneAndForward, 'meaning': type(None)})
self.assertEqual(gth(HasForeignBaseClass),
{'some_xrepr': XRepr, 'other_a': mod_generics_cache.A,
'some_b': mod_generics_cache.B})
self.assertEqual(gth(XRepr.__new__),
{'x': int, 'y': int})
self.assertEqual(gth(mod_generics_cache.B),
{'my_inner_a1': mod_generics_cache.B.A,
'my_inner_a2': mod_generics_cache.B.A,
'my_outer_a': mod_generics_cache.A})
def test_respect_no_type_check(self):
@no_type_check
class NoTpCheck:
class Inn:
def __init__(self, x: 'not a type'): ...
self.assertTrue(NoTpCheck.__no_type_check__)
self.assertTrue(NoTpCheck.Inn.__init__.__no_type_check__)
self.assertEqual(gth(ann_module2.NTC.meth), {})
class ABase(Generic[T]):
def meth(x: int): ...
@no_type_check
class Der(ABase): ...
self.assertEqual(gth(ABase.meth), {'x': int})
def test_get_type_hints_for_builtins(self):
# Should not fail for built-in classes and functions.
self.assertEqual(gth(int), {})
self.assertEqual(gth(type), {})
self.assertEqual(gth(dir), {})
self.assertEqual(gth(len), {})
self.assertEqual(gth(object.__str__), {})
self.assertEqual(gth(object().__str__), {})
self.assertEqual(gth(str.join), {})
def test_previous_behavior(self):
def testf(x, y): ...
testf.__annotations__['x'] = 'int'
self.assertEqual(gth(testf), {'x': int})
def testg(x: None): ...
self.assertEqual(gth(testg), {'x': type(None)})
def test_get_type_hints_for_object_with_annotations(self):
class A: ...
class B: ...
b = B()
b.__annotations__ = {'x': 'A'}
self.assertEqual(gth(b, locals()), {'x': A})
def test_get_type_hints_ClassVar(self):
self.assertEqual(gth(ann_module2.CV, ann_module2.__dict__),
{'var': typing.ClassVar[ann_module2.CV]})
self.assertEqual(gth(B, globals()),
{'y': int, 'x': ClassVar[Optional[B]], 'b': int})
self.assertEqual(gth(CSub, globals()),
{'z': ClassVar[CSub], 'y': int, 'b': int,
'x': ClassVar[Optional[B]]})
self.assertEqual(gth(G), {'lst': ClassVar[List[T]]})
class CollectionsAbcTests(BaseTestCase):
def test_hashable(self):
self.assertIsInstance(42, typing.Hashable)
self.assertNotIsInstance([], typing.Hashable)
def test_iterable(self):
self.assertIsInstance([], typing.Iterable)
# Due to ABC caching, the second time takes a separate code
# path and could fail. So call this a few times.
self.assertIsInstance([], typing.Iterable)
self.assertIsInstance([], typing.Iterable)
self.assertNotIsInstance(42, typing.Iterable)
# Just in case, also test issubclass() a few times.
self.assertIsSubclass(list, typing.Iterable)
self.assertIsSubclass(list, typing.Iterable)
def test_iterator(self):
it = iter([])
self.assertIsInstance(it, typing.Iterator)
self.assertNotIsInstance(42, typing.Iterator)
@skipUnless(ASYNCIO, 'Python 3.5 and multithreading required')
def test_awaitable(self):
ns = {}
exec(
"async def foo() -> typing.Awaitable[int]:\n"
" return await AwaitableWrapper(42)\n",
globals(), ns)
foo = ns['foo']
g = foo()
self.assertIsInstance(g, typing.Awaitable)
self.assertNotIsInstance(foo, typing.Awaitable)
g.send(None) # Run foo() till completion, to avoid warning.
@skipUnless(ASYNCIO, 'Python 3.5 and multithreading required')
def test_coroutine(self):
ns = {}
exec(
"async def foo():\n"
" return\n",
globals(), ns)
foo = ns['foo']
g = foo()
self.assertIsInstance(g, typing.Coroutine)
with self.assertRaises(TypeError):
isinstance(g, typing.Coroutine[int])
self.assertNotIsInstance(foo, typing.Coroutine)
try:
g.send(None)
except StopIteration:
pass
@skipUnless(ASYNCIO, 'Python 3.5 and multithreading required')
def test_async_iterable(self):
base_it = range(10) # type: Iterator[int]
it = AsyncIteratorWrapper(base_it)
self.assertIsInstance(it, typing.AsyncIterable)
self.assertIsInstance(it, typing.AsyncIterable)
self.assertNotIsInstance(42, typing.AsyncIterable)
@skipUnless(ASYNCIO, 'Python 3.5 and multithreading required')
def test_async_iterator(self):
base_it = range(10) # type: Iterator[int]
it = AsyncIteratorWrapper(base_it)
self.assertIsInstance(it, typing.AsyncIterator)
self.assertNotIsInstance(42, typing.AsyncIterator)
def test_sized(self):
self.assertIsInstance([], typing.Sized)
self.assertNotIsInstance(42, typing.Sized)
def test_container(self):
self.assertIsInstance([], typing.Container)
self.assertNotIsInstance(42, typing.Container)
def test_collection(self):
if hasattr(typing, 'Collection'):
self.assertIsInstance(tuple(), typing.Collection)
self.assertIsInstance(frozenset(), typing.Collection)
self.assertIsSubclass(dict, typing.Collection)
self.assertNotIsInstance(42, typing.Collection)
def test_abstractset(self):
self.assertIsInstance(set(), typing.AbstractSet)
self.assertNotIsInstance(42, typing.AbstractSet)
def test_mutableset(self):
self.assertIsInstance(set(), typing.MutableSet)
self.assertNotIsInstance(frozenset(), typing.MutableSet)
def test_mapping(self):
self.assertIsInstance({}, typing.Mapping)
self.assertNotIsInstance(42, typing.Mapping)
def test_mutablemapping(self):
self.assertIsInstance({}, typing.MutableMapping)
self.assertNotIsInstance(42, typing.MutableMapping)
def test_sequence(self):
self.assertIsInstance([], typing.Sequence)
self.assertNotIsInstance(42, typing.Sequence)
def test_mutablesequence(self):
self.assertIsInstance([], typing.MutableSequence)
self.assertNotIsInstance((), typing.MutableSequence)
def test_bytestring(self):
self.assertIsInstance(b'', typing.ByteString)
self.assertIsInstance(bytearray(b''), typing.ByteString)
def test_list(self):
self.assertIsSubclass(list, typing.List)
def test_deque(self):
self.assertIsSubclass(collections.deque, typing.Deque)
class MyDeque(typing.Deque[int]): ...
self.assertIsInstance(MyDeque(), collections.deque)
def test_counter(self):
self.assertIsSubclass(collections.Counter, typing.Counter)
def test_set(self):
self.assertIsSubclass(set, typing.Set)
self.assertNotIsSubclass(frozenset, typing.Set)
def test_frozenset(self):
self.assertIsSubclass(frozenset, typing.FrozenSet)
self.assertNotIsSubclass(set, typing.FrozenSet)
def test_dict(self):
self.assertIsSubclass(dict, typing.Dict)
def test_no_list_instantiation(self):
with self.assertRaises(TypeError):
typing.List()
with self.assertRaises(TypeError):
typing.List[T]()
with self.assertRaises(TypeError):
typing.List[int]()
def test_list_subclass(self):
class MyList(typing.List[int]):
pass
a = MyList()
self.assertIsInstance(a, MyList)
self.assertIsInstance(a, typing.Sequence)
self.assertIsSubclass(MyList, list)
self.assertNotIsSubclass(list, MyList)
def test_no_dict_instantiation(self):
with self.assertRaises(TypeError):
typing.Dict()
with self.assertRaises(TypeError):
typing.Dict[KT, VT]()
with self.assertRaises(TypeError):
typing.Dict[str, int]()
def test_dict_subclass(self):
class MyDict(typing.Dict[str, int]):
pass
d = MyDict()
self.assertIsInstance(d, MyDict)
self.assertIsInstance(d, typing.MutableMapping)
self.assertIsSubclass(MyDict, dict)
self.assertNotIsSubclass(dict, MyDict)
def test_defaultdict_instantiation(self):
self.assertIs(type(typing.DefaultDict()), collections.defaultdict)
self.assertIs(type(typing.DefaultDict[KT, VT]()), collections.defaultdict)
self.assertIs(type(typing.DefaultDict[str, int]()), collections.defaultdict)
def test_defaultdict_subclass(self):
class MyDefDict(typing.DefaultDict[str, int]):
pass
dd = MyDefDict()
self.assertIsInstance(dd, MyDefDict)
self.assertIsSubclass(MyDefDict, collections.defaultdict)
self.assertNotIsSubclass(collections.defaultdict, MyDefDict)
def test_ordereddict_instantiation(self):
self.assertIs(type(typing.OrderedDict()), collections.OrderedDict)
self.assertIs(type(typing.OrderedDict[KT, VT]()), collections.OrderedDict)
self.assertIs(type(typing.OrderedDict[str, int]()), collections.OrderedDict)
def test_ordereddict_subclass(self):
class MyOrdDict(typing.OrderedDict[str, int]):
pass
od = MyOrdDict()
self.assertIsInstance(od, MyOrdDict)
self.assertIsSubclass(MyOrdDict, collections.OrderedDict)
self.assertNotIsSubclass(collections.OrderedDict, MyOrdDict)
@skipUnless(sys.version_info >= (3, 3), 'ChainMap was added in 3.3')
def test_chainmap_instantiation(self):
self.assertIs(type(typing.ChainMap()), collections.ChainMap)
self.assertIs(type(typing.ChainMap[KT, VT]()), collections.ChainMap)
self.assertIs(type(typing.ChainMap[str, int]()), collections.ChainMap)
class CM(typing.ChainMap[KT, VT]): ...
self.assertIs(type(CM[int, str]()), CM)
@skipUnless(sys.version_info >= (3, 3), 'ChainMap was added in 3.3')
def test_chainmap_subclass(self):
class MyChainMap(typing.ChainMap[str, int]):
pass
cm = MyChainMap()
self.assertIsInstance(cm, MyChainMap)
self.assertIsSubclass(MyChainMap, collections.ChainMap)
self.assertNotIsSubclass(collections.ChainMap, MyChainMap)
def test_deque_instantiation(self):
self.assertIs(type(typing.Deque()), collections.deque)
self.assertIs(type(typing.Deque[T]()), collections.deque)
self.assertIs(type(typing.Deque[int]()), collections.deque)
class D(typing.Deque[T]): ...
self.assertIs(type(D[int]()), D)
def test_counter_instantiation(self):
self.assertIs(type(typing.Counter()), collections.Counter)
self.assertIs(type(typing.Counter[T]()), collections.Counter)
self.assertIs(type(typing.Counter[int]()), collections.Counter)
class C(typing.Counter[T]): ...
self.assertIs(type(C[int]()), C)
def test_counter_subclass_instantiation(self):
class MyCounter(typing.Counter[int]):
pass
d = MyCounter()
self.assertIsInstance(d, MyCounter)
self.assertIsInstance(d, typing.Counter)
self.assertIsInstance(d, collections.Counter)
def test_no_set_instantiation(self):
with self.assertRaises(TypeError):
typing.Set()
with self.assertRaises(TypeError):
typing.Set[T]()
with self.assertRaises(TypeError):
typing.Set[int]()
def test_set_subclass_instantiation(self):
class MySet(typing.Set[int]):
pass
d = MySet()
self.assertIsInstance(d, MySet)
def test_no_frozenset_instantiation(self):
with self.assertRaises(TypeError):
typing.FrozenSet()
with self.assertRaises(TypeError):
typing.FrozenSet[T]()
with self.assertRaises(TypeError):
typing.FrozenSet[int]()
def test_frozenset_subclass_instantiation(self):
class MyFrozenSet(typing.FrozenSet[int]):
pass
d = MyFrozenSet()
self.assertIsInstance(d, MyFrozenSet)
def test_no_tuple_instantiation(self):
with self.assertRaises(TypeError):
Tuple()
with self.assertRaises(TypeError):
Tuple[T]()
with self.assertRaises(TypeError):
Tuple[int]()
def test_generator(self):
def foo():
yield 42
g = foo()
self.assertIsSubclass(type(g), typing.Generator)
def test_no_generator_instantiation(self):
with self.assertRaises(TypeError):
typing.Generator()
with self.assertRaises(TypeError):
typing.Generator[T, T, T]()
with self.assertRaises(TypeError):
typing.Generator[int, int, int]()
def test_async_generator(self):
ns = {}
exec("async def f():\n"
" yield 42\n", globals(), ns)
g = ns['f']()
self.assertIsSubclass(type(g), typing.AsyncGenerator)
def test_no_async_generator_instantiation(self):
with self.assertRaises(TypeError):
typing.AsyncGenerator()
with self.assertRaises(TypeError):
typing.AsyncGenerator[T, T]()
with self.assertRaises(TypeError):
typing.AsyncGenerator[int, int]()
def test_subclassing(self):
class MMA(typing.MutableMapping):
pass
with self.assertRaises(TypeError): # It's abstract
MMA()
class MMC(MMA):
def __getitem__(self, k):
return None
def __setitem__(self, k, v):
pass
def __delitem__(self, k):
pass
def __iter__(self):
return iter(())
def __len__(self):
return 0
self.assertEqual(len(MMC()), 0)
assert callable(MMC.update)
self.assertIsInstance(MMC(), typing.Mapping)
class MMB(typing.MutableMapping[KT, VT]):
def __getitem__(self, k):
return None
def __setitem__(self, k, v):
pass
def __delitem__(self, k):
pass
def __iter__(self):
return iter(())
def __len__(self):
return 0
self.assertEqual(len(MMB()), 0)
self.assertEqual(len(MMB[str, str]()), 0)
self.assertEqual(len(MMB[KT, VT]()), 0)
self.assertNotIsSubclass(dict, MMA)
self.assertNotIsSubclass(dict, MMB)
self.assertIsSubclass(MMA, typing.Mapping)
self.assertIsSubclass(MMB, typing.Mapping)
self.assertIsSubclass(MMC, typing.Mapping)
self.assertIsInstance(MMB[KT, VT](), typing.Mapping)
self.assertIsInstance(MMB[KT, VT](), collections.abc.Mapping)
self.assertIsSubclass(MMA, collections.abc.Mapping)
self.assertIsSubclass(MMB, collections.abc.Mapping)
self.assertIsSubclass(MMC, collections.abc.Mapping)
with self.assertRaises(TypeError):
issubclass(MMB[str, str], typing.Mapping)
self.assertIsSubclass(MMC, MMA)
class I(typing.Iterable): ...
self.assertNotIsSubclass(list, I)
class G(typing.Generator[int, int, int]): ...
def g(): yield 0
self.assertIsSubclass(G, typing.Generator)
self.assertIsSubclass(G, typing.Iterable)
self.assertIsSubclass(G, collections.abc.Generator)
self.assertIsSubclass(G, collections.abc.Iterable)
self.assertNotIsSubclass(type(g), G)
def test_subclassing_async_generator(self):
class G(typing.AsyncGenerator[int, int]):
def asend(self, value):
pass
def athrow(self, typ, val=None, tb=None):
pass
ns = {}
exec('async def g(): yield 0', globals(), ns)
g = ns['g']
self.assertIsSubclass(G, typing.AsyncGenerator)
self.assertIsSubclass(G, typing.AsyncIterable)
self.assertIsSubclass(G, collections.abc.AsyncGenerator)
self.assertIsSubclass(G, collections.abc.AsyncIterable)
self.assertNotIsSubclass(type(g), G)
instance = G()
self.assertIsInstance(instance, typing.AsyncGenerator)
self.assertIsInstance(instance, typing.AsyncIterable)
self.assertIsInstance(instance, collections.abc.AsyncGenerator)
self.assertIsInstance(instance, collections.abc.AsyncIterable)
self.assertNotIsInstance(type(g), G)
self.assertNotIsInstance(g, G)
def test_subclassing_subclasshook(self):
class Base(typing.Iterable):
@classmethod
def __subclasshook__(cls, other):
if other.__name__ == 'Foo':
return True
else:
return False
class C(Base): ...
class Foo: ...
class Bar: ...
self.assertIsSubclass(Foo, Base)
self.assertIsSubclass(Foo, C)
self.assertNotIsSubclass(Bar, C)
def test_subclassing_register(self):
class A(typing.Container): ...
class B(A): ...
class C: ...
A.register(C)
self.assertIsSubclass(C, A)
self.assertNotIsSubclass(C, B)
class D: ...
B.register(D)
self.assertIsSubclass(D, A)
self.assertIsSubclass(D, B)
class M(): ...
collections.abc.MutableMapping.register(M)
self.assertIsSubclass(M, typing.Mapping)
def test_collections_as_base(self):
class M(collections.abc.Mapping): ...
self.assertIsSubclass(M, typing.Mapping)
self.assertIsSubclass(M, typing.Iterable)
class S(collections.abc.MutableSequence): ...
self.assertIsSubclass(S, typing.MutableSequence)
self.assertIsSubclass(S, typing.Iterable)
class I(collections.abc.Iterable): ...
self.assertIsSubclass(I, typing.Iterable)
class A(collections.abc.Mapping, metaclass=abc.ABCMeta): ...
class B: ...
A.register(B)
self.assertIsSubclass(B, typing.Mapping)
class OtherABCTests(BaseTestCase):
def test_contextmanager(self):
@contextlib.contextmanager
def manager():
yield 42
cm = manager()
self.assertIsInstance(cm, typing.ContextManager)
self.assertNotIsInstance(42, typing.ContextManager)
@skipUnless(ASYNCIO, 'Python 3.5 required')
def test_async_contextmanager(self):
class NotACM:
pass
self.assertIsInstance(ACM(), typing.AsyncContextManager)
self.assertNotIsInstance(NotACM(), typing.AsyncContextManager)
@contextlib.contextmanager
def manager():
yield 42
cm = manager()
self.assertNotIsInstance(cm, typing.AsyncContextManager)
self.assertEqual(typing.AsyncContextManager[int].__args__, (int,))
with self.assertRaises(TypeError):
isinstance(42, typing.AsyncContextManager[int])
with self.assertRaises(TypeError):
typing.AsyncContextManager[int, str]
class TypeTests(BaseTestCase):
def test_type_basic(self):
class User: pass
class BasicUser(User): pass
class ProUser(User): pass
def new_user(user_class: Type[User]) -> User:
return user_class()
new_user(BasicUser)
def test_type_typevar(self):
class User: pass
class BasicUser(User): pass
class ProUser(User): pass
U = TypeVar('U', bound=User)
def new_user(user_class: Type[U]) -> U:
return user_class()
new_user(BasicUser)
def test_type_optional(self):
A = Optional[Type[BaseException]]
def foo(a: A) -> Optional[BaseException]:
if a is None:
return None
else:
return a()
assert isinstance(foo(KeyboardInterrupt), KeyboardInterrupt)
assert foo(None) is None
class NewTypeTests(BaseTestCase):
def test_basic(self):
UserId = NewType('UserId', int)
UserName = NewType('UserName', str)
self.assertIsInstance(UserId(5), int)
self.assertIsInstance(UserName('Joe'), str)
self.assertEqual(UserId(5) + 1, 6)
def test_errors(self):
UserId = NewType('UserId', int)
UserName = NewType('UserName', str)
with self.assertRaises(TypeError):
issubclass(UserId, int)
with self.assertRaises(TypeError):
class D(UserName):
pass
class NamedTupleTests(BaseTestCase):
def test_basics(self):
Emp = NamedTuple('Emp', [('name', str), ('id', int)])
self.assertIsSubclass(Emp, tuple)
joe = Emp('Joe', 42)
jim = Emp(name='Jim', id=1)
self.assertIsInstance(joe, Emp)
self.assertIsInstance(joe, tuple)
self.assertEqual(joe.name, 'Joe')
self.assertEqual(joe.id, 42)
self.assertEqual(jim.name, 'Jim')
self.assertEqual(jim.id, 1)
self.assertEqual(Emp.__name__, 'Emp')
self.assertEqual(Emp._fields, ('name', 'id'))
self.assertEqual(Emp.__annotations__,
collections.OrderedDict([('name', str), ('id', int)]))
self.assertIs(Emp._field_types, Emp.__annotations__)
def test_namedtuple_pyversion(self):
if sys.version_info[:2] < (3, 6):
with self.assertRaises(TypeError):
NamedTuple('Name', one=int, other=str)
with self.assertRaises(TypeError):
class NotYet(NamedTuple):
whatever = 0
def test_annotation_usage(self):
tim = CoolEmployee('Tim', 9000)
self.assertIsInstance(tim, CoolEmployee)
self.assertIsInstance(tim, tuple)
self.assertEqual(tim.name, 'Tim')
self.assertEqual(tim.cool, 9000)
self.assertEqual(CoolEmployee.__name__, 'CoolEmployee')
self.assertEqual(CoolEmployee._fields, ('name', 'cool'))
self.assertEqual(CoolEmployee.__annotations__,
collections.OrderedDict(name=str, cool=int))
self.assertIs(CoolEmployee._field_types, CoolEmployee.__annotations__)
def test_annotation_usage_with_default(self):
jelle = CoolEmployeeWithDefault('Jelle')
self.assertIsInstance(jelle, CoolEmployeeWithDefault)
self.assertIsInstance(jelle, tuple)
self.assertEqual(jelle.name, 'Jelle')
self.assertEqual(jelle.cool, 0)
cooler_employee = CoolEmployeeWithDefault('Sjoerd', 1)
self.assertEqual(cooler_employee.cool, 1)
self.assertEqual(CoolEmployeeWithDefault.__name__, 'CoolEmployeeWithDefault')
self.assertEqual(CoolEmployeeWithDefault._fields, ('name', 'cool'))
self.assertEqual(CoolEmployeeWithDefault._field_types, dict(name=str, cool=int))
self.assertEqual(CoolEmployeeWithDefault._field_defaults, dict(cool=0))
with self.assertRaises(TypeError):
exec("""
class NonDefaultAfterDefault(NamedTuple):
x: int = 3
y: int
""")
def test_annotation_usage_with_methods(self):
self.assertEqual(XMeth(1).double(), 2)
self.assertEqual(XMeth(42).x, XMeth(42)[0])
self.assertEqual(str(XRepr(42)), '42 -> 1')
self.assertEqual(XRepr(1, 2) + XRepr(3), 0)
with self.assertRaises(AttributeError):
exec("""
class XMethBad(NamedTuple):
x: int
def _fields(self):
return 'no chance for this'
""")
with self.assertRaises(AttributeError):
exec("""
class XMethBad2(NamedTuple):
x: int
def _source(self):
return 'no chance for this as well'
""")
def test_namedtuple_keyword_usage(self):
LocalEmployee = NamedTuple("LocalEmployee", name=str, age=int)
nick = LocalEmployee('Nick', 25)
self.assertIsInstance(nick, tuple)
self.assertEqual(nick.name, 'Nick')
self.assertEqual(LocalEmployee.__name__, 'LocalEmployee')
self.assertEqual(LocalEmployee._fields, ('name', 'age'))
self.assertEqual(LocalEmployee.__annotations__, dict(name=str, age=int))
self.assertIs(LocalEmployee._field_types, LocalEmployee.__annotations__)
with self.assertRaises(TypeError):
NamedTuple('Name', [('x', int)], y=str)
with self.assertRaises(TypeError):
NamedTuple('Name', x=1, y='a')
def test_pickle(self):
global Emp # pickle wants to reference the class by name
Emp = NamedTuple('Emp', [('name', str), ('id', int)])
jane = Emp('jane', 37)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(jane, proto)
jane2 = pickle.loads(z)
self.assertEqual(jane2, jane)
class IOTests(BaseTestCase):
def test_io(self):
def stuff(a: IO) -> AnyStr:
return a.readline()
a = stuff.__annotations__['a']
self.assertEqual(a.__parameters__, (AnyStr,))
def test_textio(self):
def stuff(a: TextIO) -> str:
return a.readline()
a = stuff.__annotations__['a']
self.assertEqual(a.__parameters__, ())
def test_binaryio(self):
def stuff(a: BinaryIO) -> bytes:
return a.readline()
a = stuff.__annotations__['a']
self.assertEqual(a.__parameters__, ())
def test_io_submodule(self):
from typing.io import IO, TextIO, BinaryIO, __all__, __name__
self.assertIs(IO, typing.IO)
self.assertIs(TextIO, typing.TextIO)
self.assertIs(BinaryIO, typing.BinaryIO)
self.assertEqual(set(__all__), set(['IO', 'TextIO', 'BinaryIO']))
self.assertEqual(__name__, 'typing.io')
class RETests(BaseTestCase):
# Much of this is really testing _TypeAlias.
def test_basics(self):
pat = re.compile('[a-z]+', re.I)
self.assertIsSubclass(pat.__class__, Pattern)
self.assertIsSubclass(type(pat), Pattern)
self.assertIsInstance(pat, Pattern)
mat = pat.search('12345abcde.....')
self.assertIsSubclass(mat.__class__, Match)
self.assertIsSubclass(type(mat), Match)
self.assertIsInstance(mat, Match)
# these should just work
Pattern[Union[str, bytes]]
Match[Union[bytes, str]]
def test_alias_equality(self):
self.assertEqual(Pattern[str], Pattern[str])
self.assertNotEqual(Pattern[str], Pattern[bytes])
self.assertNotEqual(Pattern[str], Match[str])
self.assertNotEqual(Pattern[str], str)
def test_errors(self):
m = Match[Union[str, bytes]]
with self.assertRaises(TypeError):
m[str]
with self.assertRaises(TypeError):
# We don't support isinstance().
isinstance(42, Pattern[str])
with self.assertRaises(TypeError):
# We don't support issubclass().
issubclass(Pattern[bytes], Pattern[str])
def test_repr(self):
self.assertEqual(repr(Pattern), 'typing.Pattern')
self.assertEqual(repr(Pattern[str]), 'typing.Pattern[str]')
self.assertEqual(repr(Pattern[bytes]), 'typing.Pattern[bytes]')
self.assertEqual(repr(Match), 'typing.Match')
self.assertEqual(repr(Match[str]), 'typing.Match[str]')
self.assertEqual(repr(Match[bytes]), 'typing.Match[bytes]')
def test_re_submodule(self):
from typing.re import Match, Pattern, __all__, __name__
self.assertIs(Match, typing.Match)
self.assertIs(Pattern, typing.Pattern)
self.assertEqual(set(__all__), set(['Match', 'Pattern']))
self.assertEqual(__name__, 'typing.re')
def test_cannot_subclass(self):
with self.assertRaises(TypeError) as ex:
class A(typing.Match):
pass
self.assertEqual(str(ex.exception),
"type 're.Match' is not an acceptable base type")
class AllTests(BaseTestCase):
"""Tests for __all__."""
def test_all(self):
from typing import __all__ as a
# Just spot-check the first and last of every category.
self.assertIn('AbstractSet', a)
self.assertIn('ValuesView', a)
self.assertIn('cast', a)
self.assertIn('overload', a)
if hasattr(contextlib, 'AbstractContextManager'):
self.assertIn('ContextManager', a)
# Check that io and re are not exported.
self.assertNotIn('io', a)
self.assertNotIn('re', a)
# Spot-check that stdlib modules aren't exported.
self.assertNotIn('os', a)
self.assertNotIn('sys', a)
# Check that Text is defined.
self.assertIn('Text', a)
# Check previously missing classes.
self.assertIn('SupportsBytes', a)
self.assertIn('SupportsComplex', a)
if __name__ == '__main__':
main()
| FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/test/test_typing.py | Python | gpl-2.0 | 87,769 | [
"VisIt"
] | fa4af0ea3157384b5e533b2bd058469e6c49c0a18cd3f07af716d2812dc588cf |
# Copyright (c) 2001 Autonomous Zone Industries
# Copyright (c) 2002 Bryce "Zooko" Wilcox-O'Hearn
# This file is licensed under the
# GNU Lesser General Public License v2.1.
# See the file COPYING or visit http://www.gnu.org/ for details.
#
# CVS:
__cvsid = '$Id: EGTPConstants.py,v 1.1 2002/07/17 02:02:47 zooko Exp $'
# length of RSA public moduli in 8-bit bytes (octets)
# Note that it is allowable for some of the high order bits to be 0. It is even
# allowable for more than 8 of those bits to be 0 without changing the "length" of the
# modulus. This is really then the log-base-2 of the size of the space from which we
# randomly choose such values, rather than the "length" of the binary encoding of
# any particular value.
SIZE_OF_MODULAR_VALUES = 1024/8
# Your code should probably be written to work with any public exponent. It is best not to use
# this constant. But it is here because mesgen uses it currently.
HARDCODED_RSA_PUBLIC_EXPONENT = 3
# size of ids, secrets, random numbers, salt and other things that must be universally unique
# in 8-bit bytes (octets)
# You absolutely cannot change this number. In fact, it is just being hardcoded in all over the place
# and this variable is useful only as documentation.
SIZE_OF_UNIQS = 20
| zooko/egtp | EGTPConstants.py | Python | agpl-3.0 | 1,277 | [
"VisIt"
] | 3de4f98b7564cb69d015815c3075647c10c922eb981c8af276c28e4b70b0b0d8 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import unittest
import pickle
import warnings
import math
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.periodic_table import Element, Specie, DummySpecie, get_el_sp
from pymatgen.core.composition import Composition
from copy import deepcopy
class ElementTestCase(PymatgenTest):
def test_init(self):
self.assertEqual("Fe", Element("Fe").symbol, "Fe test failed")
fictional_symbols = ["D", "T", "Zebra"]
for sym in fictional_symbols:
self.assertRaises(ValueError, Element, sym)
# Test caching
self.assertEqual(id(Element("Fe")), id(Element("Fe")))
def test_nan_X(self):
self.assertTrue(math.isnan(Element.He.X))
els = sorted([Element.He, Element.H, Element.F])
self.assertEqual(els, [Element.H, Element.F, Element.He])
def test_dict(self):
fe = Element.Fe
d = fe.as_dict()
self.assertEqual(fe, Element.from_dict(d))
def test_block(self):
testsets = {"O": "p", "Fe": "d", "Li": "s", "U": "f", "Er": "f",
"Lu": "d", "Lr": "d"}
for k, v in testsets.items():
self.assertEqual(Element(k).block, v)
def test_full_electronic_structure(self):
testsets = {"O": [(1, "s", 2), (2, "s", 2), (2, "p", 4)],
"Fe": [(1, "s", 2), (2, "s", 2), (2, "p", 6), (3, "s", 2),
(3, "p", 6), (3, "d", 6), (4, "s", 2)],
"Li": [(1, "s", 2), (2, "s", 1)],
"U": [(1, "s", 2), (2, "s", 2), (2, "p", 6), (3, "s", 2),
(3, "p", 6), (3, "d", 10), (4, "s", 2), (4, "p", 6),
(4, "d", 10), (5, "s", 2), (5, "p", 6), (4, "f", 14),
(5, "d", 10), (6, "s", 2), (6, "p", 6), (5, "f", 3),
(6, "d", 1), (7, "s", 2)]}
for k, v in testsets.items():
self.assertEqual(Element(k).full_electronic_structure, v)
def test_valence(self):
testsets = {"O": (1, 4),
"Fe": (2, 6),
"Li": (0, 1)}
for k, v in testsets.items():
self.assertEqual(Element(k).valence, v)
with self.assertRaises(ValueError):
Element("U").valence
def test_term_symbols(self):
testsets = {"Li": [['2S0.5']], # s1
"C": [['1D2.0'],
['3P0.0', '3P1.0', '3P2.0'],
['1S0.0']], # p2
"Ti": [ ['1G4.0'],
['3F2.0', '3F3.0', '3F4.0'],
['1D2.0'],
['3P0.0', '3P1.0', '3P2.0'],
['1S0.0']], # d2
"Pr": [ ['2L7.5', '2L8.5'],
['2K6.5', '2K7.5'],
['4I4.5', '4I5.5', '4I6.5', '4I7.5'],
['2I5.5', '2I6.5'],
['2H4.5', '2H5.5'],
['2H4.5', '2H5.5'],
['4G2.5', '4G3.5', '4G4.5', '4G5.5'],
['2G3.5', '2G4.5'],
['2G3.5', '2G4.5'],
['4F1.5', '4F2.5', '4F3.5', '4F4.5'],
['2F2.5', '2F3.5'],
['2F2.5', '2F3.5'],
['4D0.5', '4D1.5', '4D2.5', '4D3.5'],
['2D1.5', '2D2.5'],
['2D1.5', '2D2.5'],
['2P0.5', '2P1.5'],
['4S1.5']] # f3
}
for k,v in testsets.items():
self.assertEqual(Element(k).term_symbols, v)
def test_ground_state_term_symbol(self):
testsets = {"Li": '2S0.5', # s1
"C": '3P0.0', # p2
"O": '3P2.0', # p4
"Ti": '3F2.0', # d2
"Pr": '4I4.5'} # f3
for k,v in testsets.items():
self.assertEqual(Element(k).ground_state_term_symbol, v)
def test_attributes(self):
is_true = {("Xe", "Kr"): "is_noble_gas",
("Fe", "Ni"): "is_transition_metal",
("Li", "Cs"): "is_alkali",
("Ca", "Mg"): "is_alkaline",
("F", "Br", "I"): "is_halogen",
("La",): "is_lanthanoid",
("U", "Pu"): "is_actinoid",
("Si", "Ge"): "is_metalloid",
("O", "Te"): "is_chalcogen"}
for k, v in is_true.items():
for sym in k:
self.assertTrue(getattr(Element(sym), v), sym + " is false")
keys = ["mendeleev_no", "atomic_mass",
"electronic_structure", "atomic_radius",
"min_oxidation_state", "max_oxidation_state",
"electrical_resistivity", "velocity_of_sound", "reflectivity",
"refractive_index", "poissons_ratio", "molar_volume",
"thermal_conductivity", "melting_point", "boiling_point",
"liquid_range", "critical_temperature",
"superconduction_temperature",
"bulk_modulus", "youngs_modulus", "brinell_hardness",
"rigidity_modulus", "mineral_hardness",
"vickers_hardness", "density_of_solid", "atomic_orbitals"
"coefficient_of_linear_thermal_expansion", "oxidation_states",
"common_oxidation_states", "average_ionic_radius",
"ionic_radii", "long_name"]
# Test all elements up to Uranium
for i in range(1, 104):
el = Element.from_Z(i)
d = el.data
for k in keys:
k_str = k.capitalize().replace("_", " ")
if k_str in d and (not str(d[k_str]).startswith("no data")):
self.assertIsNotNone(getattr(el, k))
elif k == "long_name":
self.assertEqual(getattr(el, "long_name"), d["Name"])
el = Element.from_Z(i)
if len(el.oxidation_states) > 0:
self.assertEqual(max(el.oxidation_states),
el.max_oxidation_state)
self.assertEqual(min(el.oxidation_states),
el.min_oxidation_state)
if el.symbol not in ["He", "Ne", "Ar"]:
self.assertTrue(el.X > 0, "No electroneg for %s" % el)
self.assertRaises(ValueError, Element.from_Z, 1000)
def test_oxidation_states(self):
el = Element.Fe
self.assertEqual(el.oxidation_states, (-2, -1, 1, 2, 3, 4, 5, 6))
self.assertEqual(el.common_oxidation_states, (2, 3))
self.assertEqual(el.icsd_oxidation_states, (2, 3))
def test_deepcopy(self):
el1 = Element.Fe
el2 = Element.Na
ellist = [el1, el2]
self.assertEqual(ellist, deepcopy(ellist),
"Deepcopy operation doesn't produce exact copy")
def test_radii(self):
el = Element.Pd
self.assertEqual(el.atomic_radius, 1.40)
self.assertEqual(el.atomic_radius_calculated, 1.69)
self.assertEqual(el.van_der_waals_radius, 1.63)
def test_data(self):
self.assertEqual(Element.Pd.data["Atomic radius"], 1.4)
al = Element.Al
val = al.thermal_conductivity
self.assertEqual(val, 235)
self.assertEqual(str(val.unit), "W K^-1 m^-1")
val = al.electrical_resistivity
self.assertEqual(val, 2.7e-08)
self.assertEqual(str(val.unit), "m ohm")
def test_sort(self):
els = [Element.Se, Element.C]
self.assertEqual(sorted(els), [Element.C, Element.Se])
def test_pickle(self):
el1 = Element.Fe
o = pickle.dumps(el1)
self.assertEqual(el1, pickle.loads(o))
#Test all elements up to Uranium
for i in range(1, 93):
self.serialize_with_pickle(Element.from_Z(i), test_eq=True)
def test_print_periodic_table(self):
Element.print_periodic_table()
class SpecieTestCase(PymatgenTest):
def setUp(self):
self.specie1 = Specie.from_string("Fe2+")
self.specie2 = Specie("Fe", 3)
self.specie3 = Specie("Fe", 2)
self.specie4 = Specie("Fe", 2, {"spin": 5})
def test_init(self):
self.assertRaises(ValueError, Specie, "Fe", 2, {"magmom": 5})
def test_cached(self):
specie5 = Specie("Fe", 2)
# self.assertEqual(id(specie5), id(self.specie3))
def test_ionic_radius(self):
self.assertEqual(self.specie2.ionic_radius, 78.5 / 100)
self.assertEqual(self.specie3.ionic_radius, 92 / 100)
self.assertAlmostEqual(Specie("Mn", 4).ionic_radius, 0.67)
def test_eq(self):
self.assertEqual(self.specie1, self.specie3,
"Static and actual constructor gives unequal result!")
self.assertNotEqual(self.specie1, self.specie2,
"Fe2+ should not be equal to Fe3+")
self.assertNotEqual(self.specie4, self.specie3)
self.assertFalse(self.specie1 == Element("Fe"))
self.assertFalse(Element("Fe") == self.specie1)
def test_cmp(self):
self.assertLess(self.specie1, self.specie2, "Fe2+ should be < Fe3+")
self.assertLess(Specie("C", 1), Specie("Se", 1))
def test_attr(self):
self.assertEqual(self.specie1.Z, 26,
"Z attribute for Fe2+ should be = Element Fe.")
self.assertEqual(self.specie4.spin, 5)
def test_deepcopy(self):
el1 = Specie("Fe", 4)
el2 = Specie("Na", 1)
ellist = [el1, el2]
self.assertEqual(ellist, deepcopy(ellist),
"Deepcopy operation doesn't produce exact copy.")
def test_pickle(self):
self.assertEqual(self.specie1, pickle.loads(pickle.dumps(self.specie1)))
for i in range(1, 5):
self.serialize_with_pickle(getattr(self, "specie%d" % i) , test_eq=True)
cs = Specie("Cs", 1)
cl = Specie("Cl", 1)
with open('cscl.pickle', 'wb') as f:
pickle.dump((cs, cl), f)
with open('cscl.pickle', 'rb') as f:
d = pickle.load(f)
self.assertEqual(d, (cs, cl))
import os
os.remove('cscl.pickle')
def test_get_crystal_field_spin(self):
self.assertEqual(Specie("Fe", 2).get_crystal_field_spin(), 4)
self.assertEqual(Specie("Fe", 3).get_crystal_field_spin(), 5)
self.assertEqual(Specie("Fe", 4).get_crystal_field_spin(), 4)
self.assertEqual(Specie("Co", 3).get_crystal_field_spin(
spin_config="low"), 0)
self.assertEqual(Specie("Co", 4).get_crystal_field_spin(
spin_config="low"), 1)
self.assertEqual(Specie("Ni", 3).get_crystal_field_spin(
spin_config="low"), 1)
self.assertEqual(Specie("Ni", 4).get_crystal_field_spin(
spin_config="low"), 0)
self.assertRaises(AttributeError,
Specie("Li", 1).get_crystal_field_spin)
self.assertRaises(AttributeError,
Specie("Ge", 4).get_crystal_field_spin)
self.assertRaises(AttributeError,
Specie("H", 1).get_crystal_field_spin)
self.assertRaises(AttributeError,
Specie("Fe", 10).get_crystal_field_spin)
self.assertRaises(ValueError, Specie("Fe", 2).get_crystal_field_spin,
"hex")
s = Specie("Co", 3).get_crystal_field_spin("tet", spin_config="low")
self.assertEqual(s, 2)
def test_get_nmr_mom(self):
self.assertEqual(Specie("H").get_nmr_quadrupole_moment(), 2.860)
self.assertEqual(Specie("Li").get_nmr_quadrupole_moment(), -0.808)
self.assertEqual(Specie("Li").get_nmr_quadrupole_moment("Li-7"), -40.1)
self.assertEqual(Specie("Si").get_nmr_quadrupole_moment(), 0.0)
self.assertRaises(ValueError, Specie("Li").get_nmr_quadrupole_moment,
"Li-109")
def test_get_shannon_radius(self):
self.assertEqual(Specie("Li", 1).get_shannon_radius("IV"), 0.59)
mn2 = Specie("Mn", 2)
self.assertEqual(mn2.get_shannon_radius("IV", "High Spin"), 0.66)
self.assertEqual(mn2.get_shannon_radius("V", "High Spin"), 0.75)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# Trigger a warning.
r = mn2.get_shannon_radius("V")
# Verify some things
self.assertEqual(len(w), 1)
self.assertIs(w[-1].category, UserWarning)
self.assertEqual(r, 0.75)
self.assertEqual(mn2.get_shannon_radius("VI", "Low Spin"), 0.67)
self.assertEqual(mn2.get_shannon_radius("VI", "High Spin"), 0.83)
self.assertEqual(mn2.get_shannon_radius("VII", "High Spin"), 0.9)
self.assertEqual(mn2.get_shannon_radius("VIII"), 0.96)
def test_sort(self):
els = map(get_el_sp, ["N3-", "Si4+", "Si3+"])
self.assertEqual(sorted(els), [Specie("Si", 3), Specie("Si", 4),
Specie("N", -3)])
def test_to_from_string(self):
fe3 = Specie("Fe", 3, {"spin": 5})
self.assertEqual(str(fe3), "Fe3+,spin=5")
fe = Specie.from_string("Fe3+,spin=5")
self.assertEqual(fe.spin, 5)
mo0 = Specie("Mo", 0, {"spin": 5})
self.assertEqual(str(mo0), "Mo0+,spin=5")
mo = Specie.from_string("Mo0+,spin=4")
self.assertEqual(mo.spin, 4)
def test_no_oxidation_state(self):
mo0 = Specie("Mo", None, {"spin": 5})
self.assertEqual(str(mo0), "Mo,spin=5")
class DummySpecieTestCase(unittest.TestCase):
def test_init(self):
self.specie1 = DummySpecie("X")
self.assertRaises(ValueError, DummySpecie, "Xe")
self.assertRaises(ValueError, DummySpecie, "Xec")
self.assertRaises(ValueError, DummySpecie, "Vac")
self.specie2 = DummySpecie("X", 2, {"spin": 3})
self.assertEqual(self.specie2.spin, 3)
def test_cached(self):
sp1 = DummySpecie("X", 2)
sp2 = DummySpecie("X", 2)
# self.assertEqual(id(sp1), id(sp2))
def test_eq(self):
self.assertFalse(DummySpecie("Xg") == DummySpecie("Xh"))
self.assertFalse(DummySpecie("Xg") == DummySpecie("Xg", 3))
self.assertTrue(DummySpecie("Xg", 3) == DummySpecie("Xg", 3))
def test_from_string(self):
sp = DummySpecie.from_string("X")
self.assertEqual(sp.oxi_state, 0)
sp = DummySpecie.from_string("X2+")
self.assertEqual(sp.oxi_state, 2)
sp = DummySpecie.from_string("X2+spin=5")
self.assertEqual(sp.oxi_state, 2)
self.assertEqual(sp.spin, 5)
def test_pickle(self):
el1 = DummySpecie("X", 3)
o = pickle.dumps(el1)
self.assertEqual(el1, pickle.loads(o))
def test_sort(self):
r = sorted([Element.Fe, DummySpecie("X")])
self.assertEqual(r, [DummySpecie("X"), Element.Fe])
self.assertTrue(DummySpecie("X", 3) < DummySpecie("X", 4))
def test_safe_from_composition(self):
c = Composition({'Xa': 1, 'Fe': 1})
self.assertEqual(DummySpecie.safe_from_composition(c).symbol, 'Xb')
self.assertEqual(DummySpecie.safe_from_composition(c, 1).symbol, 'Xb')
class FuncTest(unittest.TestCase):
def test_get_el_sp(self):
self.assertEqual(get_el_sp("Fe2+"), Specie("Fe", 2))
self.assertEqual(get_el_sp("3"), Element.Li)
self.assertEqual(get_el_sp("3.0"), Element.Li)
self.assertEqual(get_el_sp("U"), Element.U)
self.assertEqual(get_el_sp("X2+"), DummySpecie("X", 2))
self.assertEqual(get_el_sp("Mn3+"), Specie("Mn", 3))
self.assertEqual(get_el_sp(["Li+", "Mn3+"]),
[Specie("Li", 1), Specie("Mn", 3)])
if __name__ == "__main__":
unittest.main()
| nisse3000/pymatgen | pymatgen/core/tests/test_periodic_table.py | Python | mit | 16,189 | [
"pymatgen"
] | d46c90779f98ac7a426b554ef47c9a93acfcd4770c0f73ec5fc05f0c908bc39a |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from datetime import datetime
testDict = {}
testFile = open('tests')
prefix = 'framework.'
#/Users/alfoa/projects/raven/tests/framework/ErrorChecks
authors = {
'aaron.epiney': 'epinas',
'andrea.alfonsi': 'alfoa',
'bandini.alessandro': 'banda',
'bounces': 'moosebuild',
'congjian.wang': 'wangc',
'cristian.rabiti': 'crisr',
'diego.mandelli': 'mandd',
'carlo.parisi': '',
'charles.jouglard': '',
'jun.chen': 'chenj',
'jongsuk.kim': 'kimj',
'joshua.cogliati': 'cogljj',
'jjc': 'cogljj',
'kingfive': 'rinai',
'ivan.rinaldi': 'rinai',
'maljdp': 'maljdan',
'maljovec': 'maljdan',
'michael.pietrykowski': '',
'robert.kinoshita': 'bobk',
'scott.schoen': '',
'sujong.yoon': '',
'ramazan.sen': 'senrs',
'sonat.sen': 'senrs',
'paul.talbot': 'talbpaul',
'talbotpne': 'talbpaul',
'taoyiliang': 'talbpaul',
'andrew.slaughter': 'slauae',
'benjamin.spencer': 'spenbw',
'brian.alger': 'algebk',
'cody.permann': 'permcj',
'codypermann': 'permcj',
'david.andrs': 'andrsd',
'haihua.zhao': 'zhaoh',
'jason.hales': 'halejd',
'jason.miller': 'milljm',
'joseph.nielsen': 'nieljw'
# cohn.72
# derek.gaston:
# jw.peterson
}
while True:
line = testFile.readline()
if not line:
break
line = line.strip()
if line.startswith('[./'):
token = line.replace('[./','').replace(']','')
nextLine = testFile.readline().strip()
while not nextLine.startswith('input'):
nextLine = testFile.readline()
if not nextLine:
break
nextLine = nextLine.strip()
if not nextLine:
break
inputFile = nextLine.split(' = ')[1].replace('\'', '')
testDict[inputFile] = prefix + token
testFile.close()
for key,value in testDict.items():
print(key,value)
tokens = key.rsplit('.',1)[0].split('_')
if 'smooth' in tokens:
smooth = True
kernel = tokens[2]
else:
smooth = False
try: kernel = tokens[1]
except: kernel = "None"
text = os.popen('git log %s' % key).read()
revisions = []
lines = text.split('\n')
i = 0
while i < len(lines):
line = lines[i]
if line.startswith('Author:'):
author = line.rsplit('<',1)[1].rsplit('@',1)[0].lower()
if author in authors:
author = authors[author]
i += 1
line = lines[i]
date = line.split(':',1)[1][:-6].strip()
date = datetime.strptime(date, '%a %b %d %H:%M:%S %Y').strftime('%Y-%m-%d')
i += 2
line = lines[i]
description = ''
while not line.startswith('commit'):
description += line
i += 1
if i >= len(lines):
break
line = lines[i]
revisions.append((author,date,description))
i += 1
try: author,date,description = revisions.pop()
except: pass
revisions.reverse()
inputFile = open(key.split()[0],'r')
lines = [line for line in inputFile]
inputFile.close()
foundTestDescription = False
for line in lines:
if 'TestInfo' in line:
foundTestDescription = True
break
if not foundTestDescription:
for i,line in enumerate(lines):
if 'Simulation' in line:
break
newLines = []
if key.split()[0].strip().endswith("py"): newLines.append(' """\n')
newLines.append(' <TestInfo>\n')
newLines.append(' <name>%s</name>\n' % value)
newLines.append(' <author>%s</author>\n' % author)
newLines.append(' <created>%s</created>\n' % date)
newLines.append(' <classesTested> </classesTested>\n')
newLines.append(' <description>\n')
newLines.append(' This test is aimed to check .\n')
newLines.append(' </description>\n')
newLines.append(' <revisions>\n')
if len(revisions)>0:
for author,date,description in revisions:
newLines.append(' <revision author="%s" date="%s">%s</revision>\n' % (author,date,description.strip()))
newLines.append(' <revision author="alfoa" date="2017-01-21">Adding this test description.</revision>\n')
newLines.append(' </revisions>\n')
newLines.append(' </TestInfo>\n')
if key.split()[0].strip().endswith("py"): newLines.append(' """\n')
lines = lines[:(i+1)] + newLines + lines[(i+1):]
inputFile = open(key,'w')
inputFile.write(''.join(lines))
inputFile.close()
| joshua-cogliati-inl/raven | developer_tools/addTestDescriptions.py | Python | apache-2.0 | 5,073 | [
"Brian"
] | 8bd62ad2704cfd89186f4afd533c64d28e02ac97501690934438dcd92d15ec98 |
"""Filter design.
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy
from numpy import (atleast_1d, poly, polyval, roots, real, asarray, allclose,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array)
from numpy import mintypecode
import numpy as np
from scipy import special, optimize
from scipy.special import comb
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'filter_dict', 'band_dict', 'BadCoefficients',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay']
class BadCoefficients(UserWarning):
pass
abs = absolute
def findfreqs(num, den, N):
"""
Find an array of frequencies for computing the response of a filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system. The coefficients are
ordered from highest to lowest degree.
N : int
The length of the array to be computed.
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the numerator `b` and denominator `a` of a filter, compute its
frequency response::
b[0]*(jw)**(nb-1) + b[1]*(jw)**(nb-2) + ... + b[nb-1]
H(w) = -------------------------------------------------------
a[0]*(jw)**(na-1) + a[1]*(jw)**(na-2) + ... + a[na-1]
Parameters
----------
b : ndarray
Numerator of a linear filter.
a : ndarray
Denominator of a linear filter.
worN : {None, int}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which h was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
if worN is None:
w = findfreqs(b, a, 200)
elif isinstance(worN, int):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqz(b, a=1, worN=None, whole=0, plot=None):
"""
Compute the frequency response of a digital filter.
Given the numerator `b` and denominator `a` of a digital filter,
compute its frequency response::
jw -jw -jmw
jw B(e) b[0] + b[1]e + .... + b[m]e
H(e) = ---- = ------------------------------------
jw -jw -jnw
A(e) a[0] + a[1]e + .... + a[n]e
Parameters
----------
b : ndarray
numerator of a linear filter
a : ndarray
denominator of a linear filter
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The normalized frequencies at which h was computed, in radians/sample.
h : ndarray
The frequency response.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy import signal
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
b, a = map(atleast_1d, (b, a))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j * w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if plot is not None:
plot(w, h)
return w, h
def group_delay(system, w=None, whole=False):
r"""Compute the group delay of a digital filter.
The group delay measures by how many samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formally defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of array_like (b, a)
Numerator and denominator coefficients of a filter transfer function.
w : {None, int, array-like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If array, compute the delay at the frequencies given
(in radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to ``2*pi`` radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which the group delay was computed,
in radians/sample.
gd : ndarray
The group delay.
Notes
-----
The similar function in MATLAB is called `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_.
.. versionadded: 0.16.0
See Also
--------
freqz : Frequency response of a digital filter
References
----------
.. [1] Richard G. Lyons, "Understanding Digital Signal Processing,
3rd edition", p. 830.
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
if w is None:
w = 512
if isinstance(w, int):
if whole:
w = np.linspace(0, 2 * pi, w, endpoint=False)
else:
w = np.linspace(0, pi, w, endpoint=False)
w = np.atleast_1d(w)
b, a = map(np.atleast_1d, system)
c = np.convolve(b, a[::-1])
cr = c * np.arange(c.size)
z = np.exp(-1j * w)
num = np.polyval(cr[::-1], z)
den = np.polyval(c[::-1], z)
singular = np.absolute(den) < 10 * EPSILON
if np.any(singular):
warnings.warn(
"The group delay is singular at frequencies [{0}], setting to 0".
format(", ".join("{0:.3f}".format(ws) for ws in w[singular]))
)
gd = np.zeros_like(w)
gd[~singular] = np.real(num[~singular] / den[~singular]) - a.size + 1
return w, gd
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print zc
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print zr
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = numpy.diff(concatenate(([0], same_real, [0])))
run_starts = numpy.where(diffs > 0)[0]
run_stops = numpy.where(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-dimensional input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-dimensional')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a
def tf2sos(b, a, pairing='nearest'):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos`.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
b = [1.]
a = [1.]
n_sections = sos.shape[0]
for section in range(n_sections):
b = np.polymul(b, sos[section, :3])
a = np.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
n_sections = sos.shape[0]
z = np.empty(n_sections*2, np.complex128)
p = np.empty(n_sections*2, np.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2*section:2*(section+1)] = zpk[0]
p[2*section:2*(section+1)] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.where(mask)[0][0]]
def zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. Although they can operate on analog filters, the results may
be sub-optimal.
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficents of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case to set a first-order section
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
p2 = z2 = 0
else:
if not np.isreal(p1) and np.isreal(z).sum() == 1:
# Special case to ensure we choose a complex zero to pair
# with so later (setting up a first-order section)
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
assert not np.isreal(z[z1_idx])
else:
# Pair the pole with the closest zero (real or complex)
z1_idx = np.argmin(np.abs(p1 - z))
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
# Now that we have p1 and z1, figure out what p2 and z2 need to be
if not np.isreal(p1):
if not np.isreal(z1): # complex pole, complex zero
p2 = p1.conj()
z2 = z1.conj()
else: # complex pole, real zero
p2 = p1.conj()
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
else:
if not np.isreal(z1): # real pole, complex zero
z2 = z1.conj()
p2_idx = _nearest_real_complex_idx(p, z1, 'real')
p2 = p[p2_idx]
assert np.isreal(p2)
else: # real pole, real zero
# pick the next "worst" pole to use
idx = np.where(np.isreal(p))[0]
assert len(idx) > 0
p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))]
p2 = p[p2_idx]
# find a real zero to match the added pole
assert np.isreal(p2)
z2_idx = _nearest_real_complex_idx(z, p2, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
p = np.delete(p, p2_idx)
p_sos[si] = [p1, p2]
z_sos[si] = [z1, z2]
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# Construct the system, reversing order so the "worst" are last
p_sos = np.reshape(p_sos[::-1], (n_sections, 2))
z_sos = np.reshape(z_sos[::-1], (n_sections, 2))
gains = np.ones(n_sections)
gains[0] = k
for si in range(n_sections):
x = zpk2tf(z_sos[si], p_sos[si], gains[si])
sos[si] = np.concatenate(x)
return sos
def normalize(b, a):
"""Normalize polynomial representation of a transfer function.
If values of `b` are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
"""
b, a = map(atleast_1d, (b, a))
if len(a.shape) != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if len(b.shape) > 2:
raise ValueError("Numerator polynomial must be rank-1 or"
" rank-2 array.")
if len(b.shape) == 1:
b = asarray([b], b.dtype.char)
while a[0] == 0.0 and len(a) > 1:
a = a[1:]
outb = b * (1.0) / a[0]
outa = a * (1.0) / a[0]
if allclose(0, outb[:, 0], atol=1e-14):
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
while allclose(0, outb[:, 0], atol=1e-14) and (outb.shape[-1] > 1):
outb = outb[:, 1:]
if outb.shape[0] == 1:
outb = outb[0]
return outb, outa
def lp2lp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d, n))
pwo = pow(wo, numpy.arange(M - 1, -1, -1))
start1 = max((n - d, 0))
start2 = max((d - n, 0))
b = b * pwo[start1] / pwo[start2:]
a = a * pwo[start1] / pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo, numpy.arange(max((d, n))))
else:
pwo = numpy.ones(max((d, n)), b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b, (d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a, (n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
ma = max([N, D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
aprime[Dp - j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
M = max([N, D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * b[N - i] *
(wosq) ** (M - i - k) * bw ** i)
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * a[D - i] *
(wosq) ** (M - i - k) * bw ** i)
aprime[Dp - j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog one using a bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1)`` for ``s``.
"""
fs = float(fs)
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N, D])
Np = M
Dp = M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
for j in range(Np + 1):
val = 0.0
for i in range(N + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * b[N - i] *
pow(2 * fs, i) * (-1) ** k)
bprime[j] = real(val)
for j in range(Dp + 1):
val = 0.0
for i in range(D + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * a[D - i] *
pow(2 * fs, i) * (-1) ** k)
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains, construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba'), pole-zero ('zpk') or second order
sections ('sos') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError(("%s does not have order selection. Use "
"iirfilter function.") % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2 * (len(wp) - 1)
band_type += 1
if wp[0] >= ws[0]:
band_type += 1
btype = {1: 'lowpass', 2: 'highpass',
3: 'bandstop', 4: 'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype,
ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba'):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II bandpass filter and plot the frequency
response:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.semilogx(w, 20 * np.log10(abs(h)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [radians / second]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("'%s' is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype)
if output not in ['ba', 'zpk', 'sos']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc in [buttap, besselap]:
z, p, k = typefunc(N)
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn < 0) or numpy.any(Wn > 1):
raise ValueError("Digital filter critical frequencies "
"must be 0 <= Wn <= 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn')
if btype == 'lowpass':
z, p, k = _zpklp2lp(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = _zpklp2hp(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError:
raise ValueError('Wn must specify start and stop frequencies')
if btype == 'bandpass':
z, p, k = _zpklp2bp(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = _zpklp2bs(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = _zpkbilinear(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k)
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
else:
return degree
# TODO: merge these into existing functions or make public versions
def _zpkbilinear(z, p, k, fs):
"""
Return a digital filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
z : ndarray
Zeros of the analog IIR filter transfer function.
p : ndarray
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g. hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Zeros of the transformed digital filter transfer function.
p : ndarray
Poles of the transformed digital filter transfer function.
k : float
System gain of the transformed digital filter.
"""
z = atleast_1d(z)
p = atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2*fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = append(z_z, -ones(degree))
# Compensate for gain change
k_z = k * real(prod(fs2 - z) / prod(fs2 - p))
return z_z, p_z, k_z
def _zpklp2lp(z, p, k, wo=1.0):
"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : ndarray
Zeros of the analog IIR filter transfer function.
p : ndarray
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed low-pass filter transfer function.
p : ndarray
Poles of the transformed low-pass filter transfer function.
k : float
System gain of the transformed low-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo) # Avoid int wraparound
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = wo * z
p_lp = wo * p
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def _zpklp2hp(z, p, k, wo=1.0):
"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : ndarray
Zeros of the analog IIR filter transfer function.
p : ndarray
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed high-pass filter transfer function.
p : ndarray
Poles of the transformed high-pass filter transfer function.
k : float
System gain of the transformed high-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
degree = _relative_degree(z, p)
# Invert positions radially about unit circle to convert LPF to HPF
# Scale all points radially from origin to shift cutoff frequency
z_hp = wo / z
p_hp = wo / p
# If lowpass had zeros at infinity, inverting moves them to origin.
z_hp = append(z_hp, zeros(degree))
# Cancel out gain change caused by inversion
k_hp = k * real(prod(-z) / prod(-p))
return z_hp, p_hp, k_hp
def _zpklp2bp(z, p, k, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : ndarray
Zeros of the analog IIR filter transfer function.
p : ndarray
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired passband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-pass filter transfer function.
p : ndarray
Poles of the transformed band-pass filter transfer function.
k : float
System gain of the transformed band-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Scale poles and zeros to desired bandwidth
z_lp = z * bw/2
p_lp = p * bw/2
# Square root needs to produce complex result, not NaN
z_lp = z_lp.astype(complex)
p_lp = p_lp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2),
z_lp - sqrt(z_lp**2 - wo**2)))
p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2),
p_lp - sqrt(p_lp**2 - wo**2)))
# Move degree zeros to origin, leaving degree zeros at infinity for BPF
z_bp = append(z_bp, zeros(degree))
# Cancel out gain change from frequency scaling
k_bp = k * bw**degree
return z_bp, p_bp, k_bp
def _zpklp2bs(z, p, k, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
stopband width `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : ndarray
Zeros of the analog IIR filter transfer function.
p : ndarray
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired stopband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-stop filter transfer function.
p : ndarray
Poles of the transformed band-stop filter transfer function.
k : float
System gain of the transformed band-stop filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Invert to a highpass filter with desired bandwidth
z_hp = (bw/2) / z
p_hp = (bw/2) / p
# Square root needs to produce complex result, not NaN
z_hp = z_hp.astype(complex)
p_hp = p_hp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2),
z_hp - sqrt(z_hp**2 - wo**2)))
p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2),
p_hp - sqrt(p_hp**2 - wo**2)))
# Move any zeros that were at infinity to the center of the stopband
z_bs = append(z_bs, +1j*wo * ones(degree))
z_bs = append(z_bs, -1j*wo * ones(degree))
# Cancel out gain change caused by inversion
k_bs = k * real(prod(-z) / prod(-p))
return z_bs, p_bs, k_bs
def butter(N, Wn, btype='low', analog=False, output='ba'):
"""
Butterworth digital and analog filter design.
Design an Nth order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Butterworth filter, this is the point at which the gain
drops to 1/sqrt(2) that of the passband (the "-3 dB point").
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
buttord
Notes
-----
The Butterworth filter has maximally flat frequency response in the
passband.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type I digital and analog filter design.
Design an Nth order digital or analog Chebyshev type I filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type I filters, this is the point in the transition band at which
the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb1ord
Notes
-----
The Chebyshev type I filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the passband and increased ringing in the step response.
Type I filters roll off faster than Type II (`cheby2`), but Type II
filters do not have any ripple in the passband.
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type I frequency response (rp=5)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type II digital and analog filter design.
Design an Nth order digital or analog Chebyshev type II filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type II filters, this is the point in the transition band at which
the gain first reaches -`rs`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb2ord
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba'):
"""
Elliptic (Cauer) digital and analog filter design.
Design an Nth order digital or analog elliptic filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For elliptic filters, this is the point in the transition band at
which the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
ellipord
Notes
-----
Also known as Cauer or Zolotarev filters, the elliptical filter maximizes
the rate of transition between the frequency response's passband and
stopband, at the expense of ripple in both, and increased ringing in the
step response.
As `rp` approaches 0, the elliptical filter becomes a Chebyshev
type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev
type I filter (`cheby1`). As both approach 0, it becomes a Butterworth
filter (`butter`).
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptic filter frequency response (rp=5, rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=False, output='ba'):
"""Bessel/Thomson digital and analog filter design.
Design an Nth order digital or analog Bessel filter and return the
filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Bessel filter, this is defined as the point at which the
asymptotes of the response are the same as a Butterworth filter of
the same order.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
Notes
-----
Also known as a Thomson filter, the analog Bessel filter has maximally
flat group delay and maximally linear phase response, with very little
ringing in the step response.
As order increases, the Bessel filter approaches a Gaussian filter.
The digital Bessel filter is generated using the bilinear
transform, which does not preserve the phase response of the analog
filter. As such, it is only approximately correct at frequencies
below about fs/4. To get maximally flat group delay at higher
frequencies, the analog Bessel filter must be transformed using
phase-preserving techniques.
For a given `Wn`, the lowpass and highpass filter have the same phase vs
frequency curves; they are "phase-matched".
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the flat group delay and
the relationship to the Butterworth's cutoff frequency:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.plot(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed')
>>> b, a = signal.bessel(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.title('Bessel filter frequency response (with Butterworth)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
>>> plt.figure()
>>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w))
>>> plt.title('Bessel filter group delay')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Group delay [seconds]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='bessel')
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""
Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp : scalar
Edge of passband `passb`.
ind : int, {0, 1}
Index specifying which `passb` edge to vary (0 or 1).
passb : ndarray
Two element sequence of fixed passband edges.
stopb : ndarray
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : {'butter', 'cheby', 'ellip'}
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = (stopb * (passbC[0] - passbC[1]) /
(stopb ** 2 - passbC[0] * passbC[1]))
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))
elif type == 'cheby':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
n = (d0[0] * d1[1] / (d0[1] * d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=False):
"""Butterworth filter order selection.
Return the order of the lowest order digital or analog Butterworth filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
See Also
--------
butter : Filter design using order and critical points
cheb1ord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog bandpass filter with passband within 3 dB from 20 to
50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s.
Plot its frequency response, showing the passband and stopband
constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True)
>>> b, a = signal.butter(N, Wn, 'band', True)
>>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth bandpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop
>>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop
>>> plt.axis([10, 100, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))))
# Find the Butterworth natural frequency WN (or the "3dB" frequency")
# to give exactly gpass at passb.
try:
W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord))
except ZeroDivisionError:
W0 = 1.0
print("Warning, order is zero...check input parameters.")
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0 * passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2, float)
discr = sqrt((passb[1] - passb[0]) ** 2 +
4 * W0 ** 2 * passb[0] * passb[1])
WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0)
WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0], float)
WN = (-W0 * (passb[1] - passb[0]) / 2.0 +
sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 +
passb[0] * passb[1]))
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0 / pi) * arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type I
filter that loses no more than `gpass` dB in the passband and has at
least `gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
See Also
--------
cheby1 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital lowpass filter such that the passband is within 3 dB up
to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40)
>>> b, a = signal.cheby1(N, 3, Wn, 'low')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev I lowpass filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass
>>> plt.axis([0.08, 1, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0 / pi) * arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type II filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type II
filter that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
See Also
--------
cheby2 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to
0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above
0.6*(fs/2). Plot its frequency response, showing the passband and
stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
>>> b, a = signal.cheby2(N, 60, Wn, 'stop')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev II bandstop filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass
>>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop
>>> plt.axis([0.06, 1, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2, float)
nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) +
sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 +
passb[1] * passb[0]))
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2, float)
nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) +
sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) +
passb[1] * passb[0]))
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0 / pi) * arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=False):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital or analog elliptic filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.
See Also
--------
ellip : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog highpass filter such that the passband is within 3 dB
above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.ellipord(30, 10, 3, 60, True)
>>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True)
>>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptical highpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop
>>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.axis([1, 300, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0])))
if not analog:
wn = arctan(passb) * 2.0 / pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) for analog prototype of Nth order Butterworth filter.
The filter will have an angular (e.g. rad/s) cutoff frequency of 1.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
z = numpy.array([])
m = numpy.arange(-N+1, N, 2)
# Middle value is 0 to ensure an exactly real pole
p = -numpy.exp(1j * pi * m / (2 * N))
k = 1
return z, p, k
def cheb1ap(N, rp):
"""
Return (z,p,k) for Nth order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rp` decibels of ripple in the passband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero error
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
z = numpy.array([])
# Ripple factor (epsilon)
eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0)
mu = 1.0 / N * arcsinh(1 / eps)
# Arrange poles in an ellipse on the left half of the S-plane
m = numpy.arange(-N+1, N, 2)
theta = pi * m / (2*N)
p = -sinh(mu + 1j*theta)
k = numpy.prod(-p, axis=0).real
if N % 2 == 0:
k = k / sqrt((1 + eps * eps))
return z, p, k
def cheb2ap(N, rs):
"""
Return (z,p,k) for Nth order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rs` decibels of ripple in the stopband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first reaches ``-rs``.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
return numpy.array([]), numpy.array([]), 1
# Ripple factor (epsilon)
de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)
mu = arcsinh(1.0 / de) / N
if N % 2:
m = numpy.concatenate((numpy.arange(-N+1, 0, 2),
numpy.arange(2, N, 2)))
else:
m = numpy.arange(-N+1, N, 2)
z = -conjugate(1j / sin(m * pi / (2.0 * N)))
# Poles around the unit circle like Butterworth
p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N))
# Warp into Chebyshev II
p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag
p = 1.0 / p
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
return z, p, k
EPSILON = 2e-16
def _vratio(u, ineps, mp):
[s, c, d, phi] = special.ellipj(u, mp)
ret = abs(ineps - s / c)
return ret
def _kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m, 1 - m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) of Nth order elliptic analog lowpass filter.
The filter is a normalized prototype that has `rp` decibels of ripple
in the passband and a stopband `rs` decibels down.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
References
----------
Lutova, Tosic, and Evans, "Filter Design for Signal Processing", Chapters 5
and 12.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
elif N == 1:
p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0))
k = -p
z = []
return asarray(z), asarray(p), k
eps = numpy.sqrt(10 ** (0.1 * rp) - 1)
ck1 = eps / numpy.sqrt(10 ** (0.1 * rs) - 1)
ck1p = numpy.sqrt(1 - ck1 * ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs"
" specifications.")
val = special.ellipk([ck1 * ck1, ck1p * ck1p])
if abs(1 - ck1p * ck1p) < EPSILON:
krat = 0
else:
krat = N * val[0] / val[1]
m = optimize.fmin(_kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(_kratio, 0, 1, args=(krat,), maxfun=250,
maxiter=250, disp=0)
capk = special.ellipk(m)
j = numpy.arange(1 - N % 2, N, 2)
jj = len(j)
[s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s, axis=-1)
z = 1.0 / (sqrt(m) * snew)
z = 1j * z
z = numpy.concatenate((z, conjugate(z)))
r = optimize.fmin(_vratio, special.ellipk(m), args=(1. / eps, ck1p * ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N * val[0])
[sv, cv, dv, phi] = special.ellipj(v0, 1 - m)
p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON *
numpy.sqrt(numpy.sum(p * numpy.conjugate(p),
axis=0).real),
p, axis=-1)
p = numpy.concatenate((p, conjugate(newp)))
else:
p = numpy.concatenate((p, conjugate(p)))
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1 + eps * eps))
return z, p, k
def besselap(N):
"""Return (z,p,k) for analog prototype of an Nth order Bessel filter.
The filter is normalized such that the filter asymptotes are the same as
a Butterworth filter of the same order with an angular (e.g. rad/s)
cutoff frequency of 1.
Parameters
----------
N : int
The order of the Bessel filter to return zeros, poles and gain for.
Values in the range 0-25 are supported.
Returns
-------
z : ndarray
Zeros. Is always an empty array.
p : ndarray
Poles.
k : scalar
Gain. Always 1.
"""
z = []
k = 1
if N == 0:
p = []
elif N == 1:
p = [-1]
elif N == 2:
p = [-.8660254037844386467637229 + .4999999999999999999999996j,
-.8660254037844386467637229 - .4999999999999999999999996j]
elif N == 3:
p = [-.9416000265332067855971980,
-.7456403858480766441810907 - .7113666249728352680992154j,
-.7456403858480766441810907 + .7113666249728352680992154j]
elif N == 4:
p = [-.6572111716718829545787781 - .8301614350048733772399715j,
-.6572111716718829545787788 + .8301614350048733772399715j,
-.9047587967882449459642637 - .2709187330038746636700923j,
-.9047587967882449459642624 + .2709187330038746636700926j]
elif N == 5:
p = [-.9264420773877602247196260,
-.8515536193688395541722677 - .4427174639443327209850002j,
-.8515536193688395541722677 + .4427174639443327209850002j,
-.5905759446119191779319432 - .9072067564574549539291747j,
-.5905759446119191779319432 + .9072067564574549539291747j]
elif N == 6:
p = [-.9093906830472271808050953 - .1856964396793046769246397j,
-.9093906830472271808050953 + .1856964396793046769246397j,
-.7996541858328288520243325 - .5621717346937317988594118j,
-.7996541858328288520243325 + .5621717346937317988594118j,
-.5385526816693109683073792 - .9616876881954277199245657j,
-.5385526816693109683073792 + .9616876881954277199245657j]
elif N == 7:
p = [-.9194871556490290014311619,
-.8800029341523374639772340 - .3216652762307739398381830j,
-.8800029341523374639772340 + .3216652762307739398381830j,
-.7527355434093214462291616 - .6504696305522550699212995j,
-.7527355434093214462291616 + .6504696305522550699212995j,
-.4966917256672316755024763 - 1.002508508454420401230220j,
-.4966917256672316755024763 + 1.002508508454420401230220j]
elif N == 8:
p = [-.9096831546652910216327629 - .1412437976671422927888150j,
-.9096831546652910216327629 + .1412437976671422927888150j,
-.8473250802359334320103023 - .4259017538272934994996429j,
-.8473250802359334320103023 + .4259017538272934994996429j,
-.7111381808485399250796172 - .7186517314108401705762571j,
-.7111381808485399250796172 + .7186517314108401705762571j,
-.4621740412532122027072175 - 1.034388681126901058116589j,
-.4621740412532122027072175 + 1.034388681126901058116589j]
elif N == 9:
p = [-.9154957797499037686769223,
-.8911217017079759323183848 - .2526580934582164192308115j,
-.8911217017079759323183848 + .2526580934582164192308115j,
-.8148021112269012975514135 - .5085815689631499483745341j,
-.8148021112269012975514135 + .5085815689631499483745341j,
-.6743622686854761980403401 - .7730546212691183706919682j,
-.6743622686854761980403401 + .7730546212691183706919682j,
-.4331415561553618854685942 - 1.060073670135929666774323j,
-.4331415561553618854685942 + 1.060073670135929666774323j]
elif N == 10:
p = [-.9091347320900502436826431 - .1139583137335511169927714j,
-.9091347320900502436826431 + .1139583137335511169927714j,
-.8688459641284764527921864 - .3430008233766309973110589j,
-.8688459641284764527921864 + .3430008233766309973110589j,
-.7837694413101441082655890 - .5759147538499947070009852j,
-.7837694413101441082655890 + .5759147538499947070009852j,
-.6417513866988316136190854 - .8175836167191017226233947j,
-.6417513866988316136190854 + .8175836167191017226233947j,
-.4083220732868861566219785 - 1.081274842819124562037210j,
-.4083220732868861566219785 + 1.081274842819124562037210j]
elif N == 11:
p = [-.9129067244518981934637318,
-.8963656705721166099815744 - .2080480375071031919692341j,
-.8963656705721166099815744 + .2080480375071031919692341j,
-.8453044014712962954184557 - .4178696917801248292797448j,
-.8453044014712962954184557 + .4178696917801248292797448j,
-.7546938934722303128102142 - .6319150050721846494520941j,
-.7546938934722303128102142 + .6319150050721846494520941j,
-.6126871554915194054182909 - .8547813893314764631518509j,
-.6126871554915194054182909 + .8547813893314764631518509j,
-.3868149510055090879155425 - 1.099117466763120928733632j,
-.3868149510055090879155425 + 1.099117466763120928733632j]
elif N == 12:
p = [-.9084478234140682638817772 - 95506365213450398415258360.0e-27j,
-.9084478234140682638817772 + 95506365213450398415258360.0e-27j,
-.8802534342016826507901575 - .2871779503524226723615457j,
-.8802534342016826507901575 + .2871779503524226723615457j,
-.8217296939939077285792834 - .4810212115100676440620548j,
-.8217296939939077285792834 + .4810212115100676440620548j,
-.7276681615395159454547013 - .6792961178764694160048987j,
-.7276681615395159454547013 + .6792961178764694160048987j,
-.5866369321861477207528215 - .8863772751320727026622149j,
-.5866369321861477207528215 + .8863772751320727026622149j,
-.3679640085526312839425808 - 1.114373575641546257595657j,
-.3679640085526312839425808 + 1.114373575641546257595657j]
elif N == 13:
p = [-.9110914665984182781070663,
-.8991314665475196220910718 - .1768342956161043620980863j,
-.8991314665475196220910718 + .1768342956161043620980863j,
-.8625094198260548711573628 - .3547413731172988997754038j,
-.8625094198260548711573628 + .3547413731172988997754038j,
-.7987460692470972510394686 - .5350752120696801938272504j,
-.7987460692470972510394686 + .5350752120696801938272504j,
-.7026234675721275653944062 - .7199611890171304131266374j,
-.7026234675721275653944062 + .7199611890171304131266374j,
-.5631559842430199266325818 - .9135900338325109684927731j,
-.5631559842430199266325818 + .9135900338325109684927731j,
-.3512792323389821669401925 - 1.127591548317705678613239j,
-.3512792323389821669401925 + 1.127591548317705678613239j]
elif N == 14:
p = [-.9077932138396487614720659 - 82196399419401501888968130.0e-27j,
-.9077932138396487614720659 + 82196399419401501888968130.0e-27j,
-.8869506674916445312089167 - .2470079178765333183201435j,
-.8869506674916445312089167 + .2470079178765333183201435j,
-.8441199160909851197897667 - .4131653825102692595237260j,
-.8441199160909851197897667 + .4131653825102692595237260j,
-.7766591387063623897344648 - .5819170677377608590492434j,
-.7766591387063623897344648 + .5819170677377608590492434j,
-.6794256425119233117869491 - .7552857305042033418417492j,
-.6794256425119233117869491 + .7552857305042033418417492j,
-.5418766775112297376541293 - .9373043683516919569183099j,
-.5418766775112297376541293 + .9373043683516919569183099j,
-.3363868224902037330610040 - 1.139172297839859991370924j,
-.3363868224902037330610040 + 1.139172297839859991370924j]
elif N == 15:
p = [-.9097482363849064167228581,
-.9006981694176978324932918 - .1537681197278439351298882j,
-.9006981694176978324932918 + .1537681197278439351298882j,
-.8731264620834984978337843 - .3082352470564267657715883j,
-.8731264620834984978337843 + .3082352470564267657715883j,
-.8256631452587146506294553 - .4642348752734325631275134j,
-.8256631452587146506294553 + .4642348752734325631275134j,
-.7556027168970728127850416 - .6229396358758267198938604j,
-.7556027168970728127850416 + .6229396358758267198938604j,
-.6579196593110998676999362 - .7862895503722515897065645j,
-.6579196593110998676999362 + .7862895503722515897065645j,
-.5224954069658330616875186 - .9581787261092526478889345j,
-.5224954069658330616875186 + .9581787261092526478889345j,
-.3229963059766444287113517 - 1.149416154583629539665297j,
-.3229963059766444287113517 + 1.149416154583629539665297j]
elif N == 16:
p = [-.9072099595087001356491337 - 72142113041117326028823950.0e-27j,
-.9072099595087001356491337 + 72142113041117326028823950.0e-27j,
-.8911723070323647674780132 - .2167089659900576449410059j,
-.8911723070323647674780132 + .2167089659900576449410059j,
-.8584264231521330481755780 - .3621697271802065647661080j,
-.8584264231521330481755780 + .3621697271802065647661080j,
-.8074790293236003885306146 - .5092933751171800179676218j,
-.8074790293236003885306146 + .5092933751171800179676218j,
-.7356166304713115980927279 - .6591950877860393745845254j,
-.7356166304713115980927279 + .6591950877860393745845254j,
-.6379502514039066715773828 - .8137453537108761895522580j,
-.6379502514039066715773828 + .8137453537108761895522580j,
-.5047606444424766743309967 - .9767137477799090692947061j,
-.5047606444424766743309967 + .9767137477799090692947061j,
-.3108782755645387813283867 - 1.158552841199330479412225j,
-.3108782755645387813283867 + 1.158552841199330479412225j]
elif N == 17:
p = [-.9087141161336397432860029,
-.9016273850787285964692844 - .1360267995173024591237303j,
-.9016273850787285964692844 + .1360267995173024591237303j,
-.8801100704438627158492165 - .2725347156478803885651973j,
-.8801100704438627158492165 + .2725347156478803885651973j,
-.8433414495836129204455491 - .4100759282910021624185986j,
-.8433414495836129204455491 + .4100759282910021624185986j,
-.7897644147799708220288138 - .5493724405281088674296232j,
-.7897644147799708220288138 + .5493724405281088674296232j,
-.7166893842372349049842743 - .6914936286393609433305754j,
-.7166893842372349049842743 + .6914936286393609433305754j,
-.6193710717342144521602448 - .8382497252826992979368621j,
-.6193710717342144521602448 + .8382497252826992979368621j,
-.4884629337672704194973683 - .9932971956316781632345466j,
-.4884629337672704194973683 + .9932971956316781632345466j,
-.2998489459990082015466971 - 1.166761272925668786676672j,
-.2998489459990082015466971 + 1.166761272925668786676672j]
elif N == 18:
p = [-.9067004324162775554189031 - 64279241063930693839360680.0e-27j,
-.9067004324162775554189031 + 64279241063930693839360680.0e-27j,
-.8939764278132455733032155 - .1930374640894758606940586j,
-.8939764278132455733032155 + .1930374640894758606940586j,
-.8681095503628830078317207 - .3224204925163257604931634j,
-.8681095503628830078317207 + .3224204925163257604931634j,
-.8281885016242836608829018 - .4529385697815916950149364j,
-.8281885016242836608829018 + .4529385697815916950149364j,
-.7726285030739558780127746 - .5852778162086640620016316j,
-.7726285030739558780127746 + .5852778162086640620016316j,
-.6987821445005273020051878 - .7204696509726630531663123j,
-.6987821445005273020051878 + .7204696509726630531663123j,
-.6020482668090644386627299 - .8602708961893664447167418j,
-.6020482668090644386627299 + .8602708961893664447167418j,
-.4734268069916151511140032 - 1.008234300314801077034158j,
-.4734268069916151511140032 + 1.008234300314801077034158j,
-.2897592029880489845789953 - 1.174183010600059128532230j,
-.2897592029880489845789953 + 1.174183010600059128532230j]
elif N == 19:
p = [-.9078934217899404528985092,
-.9021937639390660668922536 - .1219568381872026517578164j,
-.9021937639390660668922536 + .1219568381872026517578164j,
-.8849290585034385274001112 - .2442590757549818229026280j,
-.8849290585034385274001112 + .2442590757549818229026280j,
-.8555768765618421591093993 - .3672925896399872304734923j,
-.8555768765618421591093993 + .3672925896399872304734923j,
-.8131725551578197705476160 - .4915365035562459055630005j,
-.8131725551578197705476160 + .4915365035562459055630005j,
-.7561260971541629355231897 - .6176483917970178919174173j,
-.7561260971541629355231897 + .6176483917970178919174173j,
-.6818424412912442033411634 - .7466272357947761283262338j,
-.6818424412912442033411634 + .7466272357947761283262338j,
-.5858613321217832644813602 - .8801817131014566284786759j,
-.5858613321217832644813602 + .8801817131014566284786759j,
-.4595043449730988600785456 - 1.021768776912671221830298j,
-.4595043449730988600785456 + 1.021768776912671221830298j,
-.2804866851439370027628724 - 1.180931628453291873626003j,
-.2804866851439370027628724 + 1.180931628453291873626003j]
elif N == 20:
p = [-.9062570115576771146523497 - 57961780277849516990208850.0e-27j,
-.9062570115576771146523497 + 57961780277849516990208850.0e-27j,
-.8959150941925768608568248 - .1740317175918705058595844j,
-.8959150941925768608568248 + .1740317175918705058595844j,
-.8749560316673332850673214 - .2905559296567908031706902j,
-.8749560316673332850673214 + .2905559296567908031706902j,
-.8427907479956670633544106 - .4078917326291934082132821j,
-.8427907479956670633544106 + .4078917326291934082132821j,
-.7984251191290606875799876 - .5264942388817132427317659j,
-.7984251191290606875799876 + .5264942388817132427317659j,
-.7402780309646768991232610 - .6469975237605228320268752j,
-.7402780309646768991232610 + .6469975237605228320268752j,
-.6658120544829934193890626 - .7703721701100763015154510j,
-.6658120544829934193890626 + .7703721701100763015154510j,
-.5707026806915714094398061 - .8982829066468255593407161j,
-.5707026806915714094398061 + .8982829066468255593407161j,
-.4465700698205149555701841 - 1.034097702560842962315411j,
-.4465700698205149555701841 + 1.034097702560842962315411j,
-.2719299580251652601727704 - 1.187099379810885886139638j,
-.2719299580251652601727704 + 1.187099379810885886139638j]
elif N == 21:
p = [-.9072262653142957028884077,
-.9025428073192696303995083 - .1105252572789856480992275j,
-.9025428073192696303995083 + .1105252572789856480992275j,
-.8883808106664449854431605 - .2213069215084350419975358j,
-.8883808106664449854431605 + .2213069215084350419975358j,
-.8643915813643204553970169 - .3326258512522187083009453j,
-.8643915813643204553970169 + .3326258512522187083009453j,
-.8299435470674444100273463 - .4448177739407956609694059j,
-.8299435470674444100273463 + .4448177739407956609694059j,
-.7840287980408341576100581 - .5583186348022854707564856j,
-.7840287980408341576100581 + .5583186348022854707564856j,
-.7250839687106612822281339 - .6737426063024382240549898j,
-.7250839687106612822281339 + .6737426063024382240549898j,
-.6506315378609463397807996 - .7920349342629491368548074j,
-.6506315378609463397807996 + .7920349342629491368548074j,
-.5564766488918562465935297 - .9148198405846724121600860j,
-.5564766488918562465935297 + .9148198405846724121600860j,
-.4345168906815271799687308 - 1.045382255856986531461592j,
-.4345168906815271799687308 + 1.045382255856986531461592j,
-.2640041595834031147954813 - 1.192762031948052470183960j,
-.2640041595834031147954813 + 1.192762031948052470183960j]
elif N == 22:
p = [-.9058702269930872551848625 - 52774908289999045189007100.0e-27j,
-.9058702269930872551848625 + 52774908289999045189007100.0e-27j,
-.8972983138153530955952835 - .1584351912289865608659759j,
-.8972983138153530955952835 + .1584351912289865608659759j,
-.8799661455640176154025352 - .2644363039201535049656450j,
-.8799661455640176154025352 + .2644363039201535049656450j,
-.8534754036851687233084587 - .3710389319482319823405321j,
-.8534754036851687233084587 + .3710389319482319823405321j,
-.8171682088462720394344996 - .4785619492202780899653575j,
-.8171682088462720394344996 + .4785619492202780899653575j,
-.7700332930556816872932937 - .5874255426351153211965601j,
-.7700332930556816872932937 + .5874255426351153211965601j,
-.7105305456418785989070935 - .6982266265924524000098548j,
-.7105305456418785989070935 + .6982266265924524000098548j,
-.6362427683267827226840153 - .8118875040246347267248508j,
-.6362427683267827226840153 + .8118875040246347267248508j,
-.5430983056306302779658129 - .9299947824439872998916657j,
-.5430983056306302779658129 + .9299947824439872998916657j,
-.4232528745642628461715044 - 1.055755605227545931204656j,
-.4232528745642628461715044 + 1.055755605227545931204656j,
-.2566376987939318038016012 - 1.197982433555213008346532j,
-.2566376987939318038016012 + 1.197982433555213008346532j]
elif N == 23:
p = [-.9066732476324988168207439,
-.9027564979912504609412993 - .1010534335314045013252480j,
-.9027564979912504609412993 + .1010534335314045013252480j,
-.8909283242471251458653994 - .2023024699381223418195228j,
-.8909283242471251458653994 + .2023024699381223418195228j,
-.8709469395587416239596874 - .3039581993950041588888925j,
-.8709469395587416239596874 + .3039581993950041588888925j,
-.8423805948021127057054288 - .4062657948237602726779246j,
-.8423805948021127057054288 + .4062657948237602726779246j,
-.8045561642053176205623187 - .5095305912227258268309528j,
-.8045561642053176205623187 + .5095305912227258268309528j,
-.7564660146829880581478138 - .6141594859476032127216463j,
-.7564660146829880581478138 + .6141594859476032127216463j,
-.6965966033912705387505040 - .7207341374753046970247055j,
-.6965966033912705387505040 + .7207341374753046970247055j,
-.6225903228771341778273152 - .8301558302812980678845563j,
-.6225903228771341778273152 + .8301558302812980678845563j,
-.5304922463810191698502226 - .9439760364018300083750242j,
-.5304922463810191698502226 + .9439760364018300083750242j,
-.4126986617510148836149955 - 1.065328794475513585531053j,
-.4126986617510148836149955 + 1.065328794475513585531053j,
-.2497697202208956030229911 - 1.202813187870697831365338j,
-.2497697202208956030229911 + 1.202813187870697831365338j]
elif N == 24:
p = [-.9055312363372773709269407 - 48440066540478700874836350.0e-27j,
-.9055312363372773709269407 + 48440066540478700874836350.0e-27j,
-.8983105104397872954053307 - .1454056133873610120105857j,
-.8983105104397872954053307 + .1454056133873610120105857j,
-.8837358034555706623131950 - .2426335234401383076544239j,
-.8837358034555706623131950 + .2426335234401383076544239j,
-.8615278304016353651120610 - .3403202112618624773397257j,
-.8615278304016353651120610 + .3403202112618624773397257j,
-.8312326466813240652679563 - .4386985933597305434577492j,
-.8312326466813240652679563 + .4386985933597305434577492j,
-.7921695462343492518845446 - .5380628490968016700338001j,
-.7921695462343492518845446 + .5380628490968016700338001j,
-.7433392285088529449175873 - .6388084216222567930378296j,
-.7433392285088529449175873 + .6388084216222567930378296j,
-.6832565803536521302816011 - .7415032695091650806797753j,
-.6832565803536521302816011 + .7415032695091650806797753j,
-.6096221567378335562589532 - .8470292433077202380020454j,
-.6096221567378335562589532 + .8470292433077202380020454j,
-.5185914574820317343536707 - .9569048385259054576937721j,
-.5185914574820317343536707 + .9569048385259054576937721j,
-.4027853855197518014786978 - 1.074195196518674765143729j,
-.4027853855197518014786978 + 1.074195196518674765143729j,
-.2433481337524869675825448 - 1.207298683731972524975429j,
-.2433481337524869675825448 + 1.207298683731972524975429j]
elif N == 25:
p = [-.9062073871811708652496104,
-.9028833390228020537142561 - 93077131185102967450643820.0e-27j,
-.9028833390228020537142561 + 93077131185102967450643820.0e-27j,
-.8928551459883548836774529 - .1863068969804300712287138j,
-.8928551459883548836774529 + .1863068969804300712287138j,
-.8759497989677857803656239 - .2798521321771408719327250j,
-.8759497989677857803656239 + .2798521321771408719327250j,
-.8518616886554019782346493 - .3738977875907595009446142j,
-.8518616886554019782346493 + .3738977875907595009446142j,
-.8201226043936880253962552 - .4686668574656966589020580j,
-.8201226043936880253962552 + .4686668574656966589020580j,
-.7800496278186497225905443 - .5644441210349710332887354j,
-.7800496278186497225905443 + .5644441210349710332887354j,
-.7306549271849967721596735 - .6616149647357748681460822j,
-.7306549271849967721596735 + .6616149647357748681460822j,
-.6704827128029559528610523 - .7607348858167839877987008j,
-.6704827128029559528610523 + .7607348858167839877987008j,
-.5972898661335557242320528 - .8626676330388028512598538j,
-.5972898661335557242320528 + .8626676330388028512598538j,
-.5073362861078468845461362 - .9689006305344868494672405j,
-.5073362861078468845461362 + .9689006305344868494672405j,
-.3934529878191079606023847 - 1.082433927173831581956863j,
-.3934529878191079606023847 + 1.082433927173831581956863j,
-.2373280669322028974199184 - 1.211476658382565356579418j,
-.2373280669322028974199184 + 1.211476658382565356579418j]
else:
raise ValueError("Bessel Filter not supported for order %s" % N)
return asarray(z), asarray(p), k
filter_dict = {'butter': [buttap, buttord],
'butterworth': [buttap, buttord],
'cauer': [ellipap, ellipord],
'elliptic': [ellipap, ellipord],
'ellip': [ellipap, ellipord],
'bessel': [besselap],
'cheby1': [cheb1ap, cheb1ord],
'chebyshev1': [cheb1ap, cheb1ord],
'chebyshevi': [cheb1ap, cheb1ord],
'cheby2': [cheb2ap, cheb2ord],
'chebyshev2': [cheb2ap, cheb2ord],
'chebyshevii': [cheb2ap, cheb2ord],
}
band_dict = {'band': 'bandpass',
'bandpass': 'bandpass',
'pass': 'bandpass',
'bp': 'bandpass',
'bs': 'bandstop',
'bandstop': 'bandstop',
'bands': 'bandstop',
'stop': 'bandstop',
'l': 'lowpass',
'low': 'lowpass',
'lowpass': 'lowpass',
'lp': 'lowpass',
'high': 'highpass',
'highpass': 'highpass',
'h': 'highpass',
'hp': 'highpass',
}
| ortylp/scipy | scipy/signal/filter_design.py | Python | bsd-3-clause | 127,807 | [
"Gaussian"
] | aa1488e7279335b4e7373f2cf4184781e8e36da1c364375a1709982a81c9e3ad |
"""
Views for user API
"""
from django.shortcuts import redirect
from django.utils import dateparse
from rest_framework import generics, views
from rest_framework.decorators import api_view
from rest_framework.response import Response
from opaque_keys.edx.keys import UsageKey
from opaque_keys import InvalidKeyError
from courseware.access import is_mobile_available_for_user
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from courseware.views import get_current_child, save_positions_recursively_up
from student.models import CourseEnrollment, User
from xblock.fields import Scope
from xblock.runtime import KeyValueStore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from .serializers import CourseEnrollmentSerializer, UserSerializer
from .. import errors
from ..utils import mobile_view, mobile_course_access
@mobile_view(is_user=True)
class UserDetail(generics.RetrieveAPIView):
"""
**Use Case**
Get information about the specified user and access other resources
the user has permissions for.
Users are redirected to this endpoint after they sign in.
You can use the **course_enrollments** value in the response to get a
list of courses the user is enrolled in.
**Example Request**
GET /api/mobile/v0.5/users/{username}
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* course_enrollments: The URI to list the courses the currently signed
in user is enrolled in.
* email: The email address of the currently signed in user.
* id: The ID of the user.
* name: The full name of the currently signed in user.
* username: The username of the currently signed in user.
"""
queryset = (
User.objects.all()
.select_related('profile', 'course_enrollments')
)
serializer_class = UserSerializer
lookup_field = 'username'
@mobile_view(is_user=True)
class UserCourseStatus(views.APIView):
"""
**Use Cases**
Get or update the ID of the module that the specified user last
visited in the specified course.
**Example Requests**
GET /api/mobile/v0.5/users/{username}/course_status_info/{course_id}
PATCH /api/mobile/v0.5/users/{username}/course_status_info/{course_id}
**PATCH Parameters**
The body of the PATCH request can include the following parameters.
* last_visited_module_id={module_id}
* modification_date={date}
The modification_date parameter is optional. If it is present, the
update will only take effect if the modification_date in the
request is later than the modification_date saved on the server.
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* last_visited_module_id: The ID of the last module that the user
visited in the course.
* last_visited_module_path: The ID of the modules in the path from the
last visited module to the course module.
"""
http_method_names = ["get", "patch"]
def _last_visited_module_path(self, request, course):
"""
Returns the path from the last module visited by the current user in the given course up to
the course module. If there is no such visit, the first item deep enough down the course
tree is used.
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course.id, course=course
)
path = [course_module]
chapter = get_current_child(course_module, min_depth=2)
if chapter is not None:
path.append(chapter)
section = get_current_child(chapter, min_depth=1)
if section is not None:
path.append(section)
path.reverse()
return path
def _get_course_info(self, request, course):
"""
Returns the course status
"""
path = self._last_visited_module_path(request, course)
path_ids = [unicode(module.location) for module in path]
return Response({
"last_visited_module_id": path_ids[0],
"last_visited_module_path": path_ids,
})
def _update_last_visited_module_id(self, request, course, module_key, modification_date):
"""
Saves the module id if the found modification_date is less recent than the passed modification date
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
try:
module_descriptor = modulestore().get_item(module_key)
except ItemNotFoundError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
module = get_module_for_descriptor(
request.user, request, module_descriptor, field_data_cache, course.id, course=course
)
if modification_date:
key = KeyValueStore.Key(
scope=Scope.user_state,
user_id=request.user.id,
block_scope_id=course.location,
field_name='position'
)
original_store_date = field_data_cache.last_modified(key)
if original_store_date is not None and modification_date < original_store_date:
# old modification date so skip update
return self._get_course_info(request, course)
save_positions_recursively_up(request.user, request, field_data_cache, module, course=course)
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def get(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Get the ID of the module that the specified user last visited in the specified course.
"""
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def patch(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Update the ID of the module that the specified user last visited in the specified course.
"""
module_id = request.DATA.get("last_visited_module_id")
modification_date_string = request.DATA.get("modification_date")
modification_date = None
if modification_date_string:
modification_date = dateparse.parse_datetime(modification_date_string)
if not modification_date or not modification_date.tzinfo:
return Response(errors.ERROR_INVALID_MODIFICATION_DATE, status=400)
if module_id:
try:
module_key = UsageKey.from_string(module_id)
except InvalidKeyError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
return self._update_last_visited_module_id(request, course, module_key, modification_date)
else:
# The arguments are optional, so if there's no argument just succeed
return self._get_course_info(request, course)
@mobile_view(is_user=True)
class UserCourseEnrollmentsList(generics.ListAPIView):
"""
**Use Case**
Get information about the courses that the currently signed in user is
enrolled in.
**Example Request**
GET /api/mobile/v0.5/users/{username}/course_enrollments/
**Response Values**
If the request for information about the user is successful, the
request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* certificate: Information about the user's earned certificate in the
course.
* course: A collection of the following data about the course.
* course_handouts: The URI to get data for course handouts.
* course_image: The path to the course image.
* course_updates: The URI to get data for course updates.
* end: The end date of the course.
* id: The unique ID of the course.
* latest_updates: Reserved for future use.
* name: The name of the course.
* number: The course number.
* org: The organization that created the course.
* start: The date and time when the course starts.
* subscription_id: A unique "clean" (alphanumeric with '_') ID of
the course.
* video_outline: The URI to get the list of all videos that the user
can access in the course.
* created: The date the course was created.
* is_active: Whether the course is currently active. Possible values
are true or false.
* mode: The type of certificate registration for this course (honor or
certified).
* url: URL to the downloadable version of the certificate, if exists.
"""
queryset = CourseEnrollment.objects.all()
serializer_class = CourseEnrollmentSerializer
lookup_field = 'username'
def get_queryset(self):
enrollments = self.queryset.filter(
user__username=self.kwargs['username'],
is_active=True
).order_by('created').reverse()
return [
enrollment for enrollment in enrollments
if enrollment.course_overview and
is_mobile_available_for_user(self.request.user, enrollment.course_overview)
]
@api_view(["GET"])
@mobile_view()
def my_user_info(request):
"""
Redirect to the currently-logged-in user's info page
"""
return redirect("user-detail", username=request.user.username)
| xuxiao19910803/edx | lms/djangoapps/mobile_api/users/views.py | Python | agpl-3.0 | 10,046 | [
"VisIt"
] | cc97fc276dba900df36eaeadcb6fa23b6df517fb09f2ed6f1d5cc53042e5f2f0 |
import json
import nose
import datetime
import pylons
import sqlalchemy.orm as orm
import ckan.plugins as p
import ckan.lib.create_test_data as ctd
import ckan.model as model
import ckan.tests.legacy as tests
import ckan.tests.helpers as helpers
import ckanext.datastore.db as db
from ckanext.datastore.tests.helpers import rebuild_all_dbs, set_url_type
assert_equal = nose.tools.assert_equal
class TestDatastoreUpsert(tests.WsgiAppCase):
sysadmin_user = None
normal_user = None
@classmethod
def setup_class(cls):
if not tests.is_datastore_supported():
raise nose.SkipTest("Datastore not supported")
p.load('timeseries')
helpers.reset_db()
ctd.CreateTestData.create()
cls.sysadmin_user = model.User.get('testsysadmin')
cls.normal_user = model.User.get('annafan')
set_url_type(
model.Package.get('annakarenina').resources, cls.sysadmin_user)
resource = model.Package.get('annakarenina').resources[0]
cls.data = {
'resource_id': resource.id,
'fields': [{'id': u'b\xfck', 'type': 'text'},
{'id': 'author', 'type': 'text'},
{'id': 'nested', 'type': 'json'},
{'id': 'characters', 'type': 'text[]'},
{'id': 'published'}],
'primary_key': u'b\xfck',
'records': [{u'b\xfck': 'annakarenina', 'author': 'tolstoy',
'published': '2005-03-01', 'nested': ['b', {'moo': 'moo'}]},
{u'b\xfck': 'warandpeace', 'author': 'tolstoy',
'nested': {'a':'b'}}
]
}
postparams = '%s=1' % json.dumps(cls.data)
auth = {'Authorization': str(cls.sysadmin_user.apikey)}
res = cls.app.post('/api/action/datastore_ts_create', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
engine = db._get_engine(
{'connection_url': pylons.config['ckan.datastore.write_url']})
cls.Session = orm.scoped_session(orm.sessionmaker(bind=engine))
@classmethod
def teardown_class(cls):
rebuild_all_dbs(cls.Session)
p.unload('timeseries')
def test_upsert_requires_auth(self):
data = {
'resource_id': self.data['resource_id']
}
postparams = '%s=1' % json.dumps(data)
res = self.app.post('/api/action/datastore_ts_upsert', params=postparams,
status=403)
res_dict = json.loads(res.body)
assert res_dict['success'] is False
def test_upsert_empty_fails(self):
postparams = '%s=1' % json.dumps({})
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_ts_upsert', params=postparams,
extra_environ=auth, status=409)
res_dict = json.loads(res.body)
assert res_dict['success'] is False
def test_upsert_basic(self):
c = self.Session.connection()
results = c.execute('select 1 from "{0}"'.format(self.data['resource_id']))
assert results.rowcount == 2
self.Session.remove()
hhguide = u"hitchhiker's guide to the galaxy"
data = {
'resource_id': self.data['resource_id'],
'method': 'upsert',
'records': [{
'author': 'adams',
'nested': {'a': 2, 'b': {'c': 'd'}},
'characters': ['Arthur Dent', 'Marvin'],
'nested': {'foo': 'bar'},
u'b\xfck': hhguide}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_ts_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
c = self.Session.connection()
results = c.execute('select * from "{0}"'.format(self.data['resource_id']))
assert results.rowcount == 3
records = results.fetchall()
assert records[2][u'b\xfck'] == hhguide
assert records[2].author == 'adams'
assert records[2].characters == ['Arthur Dent', 'Marvin']
assert json.loads(records[2].nested.json) == {'foo': 'bar'}
self.Session.remove()
c = self.Session.connection()
results = c.execute("select * from \"{0}\" where author='{1}'".format(self.data['resource_id'], 'adams'))
assert results.rowcount == 1
self.Session.remove()
# upsert only the publish date
data = {
'resource_id': self.data['resource_id'],
'method': 'upsert',
'records': [{'published': '1979-1-1', u'b\xfck': hhguide}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_ts_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
c = self.Session.connection()
results = c.execute('select * from "{0}"'.format(self.data['resource_id']))
assert results.rowcount == 3
records = results.fetchall()
assert records[2][u'b\xfck'] == hhguide
assert records[2].author == 'adams'
assert records[2].published == datetime.datetime(1979, 1, 1)
self.Session.remove()
# delete publish date
data = {
'resource_id': self.data['resource_id'],
'method': 'upsert',
'records': [{u'b\xfck': hhguide, 'published': None}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_ts_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
c = self.Session.connection()
results = c.execute('select * from "{0}"'.format(self.data['resource_id']))
assert results.rowcount == 3
records = results.fetchall()
assert records[2][u'b\xfck'] == hhguide
assert records[2].author == 'adams'
assert records[2].published == None
self.Session.remove()
data = {
'resource_id': self.data['resource_id'],
'method': 'upsert',
'records': [{'author': 'tolkien', u'b\xfck': 'the hobbit'}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_ts_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
c = self.Session.connection()
results = c.execute('select * from "{0}"'.format(self.data['resource_id']))
assert results.rowcount == 4
records = results.fetchall()
assert records[3][u'b\xfck'] == 'the hobbit'
assert records[3].author == 'tolkien'
self.Session.remove()
# test % in records
data = {
'resource_id': self.data['resource_id'],
'method': 'upsert',
'records': [{'author': 'tol % kien', u'b\xfck': 'the % hobbit'}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_ts_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
def test_upsert_missing_key(self):
data = {
'resource_id': self.data['resource_id'],
'method': 'upsert',
'records': [{'author': 'tolkien'}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_ts_upsert', params=postparams,
extra_environ=auth, status=409)
res_dict = json.loads(res.body)
assert res_dict['success'] is False
def test_upsert_non_existing_field(self):
data = {
'resource_id': self.data['resource_id'],
'method': 'upsert',
'records': [{u'b\xfck': 'annakarenina', 'dummy': 'tolkien'}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_ts_upsert', params=postparams,
extra_environ=auth, status=409)
res_dict = json.loads(res.body)
assert res_dict['success'] is False
def test_upsert_works_with_empty_list_in_json_field(self):
hhguide = u"hitchhiker's guide to the galaxy"
data = {
'resource_id': self.data['resource_id'],
'method': 'upsert',
'records': [{
'nested': [],
u'b\xfck': hhguide}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_ts_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True, res_dict
c = self.Session.connection()
results = c.execute('select * from "{0}"'.format(data['resource_id']))
record = [r for r in results.fetchall() if r[3] == hhguide] # r[2] is autogen_timestamp
self.Session.remove()
assert len(record) == 1, record
assert_equal(json.loads(record[0][5].json),
data['records'][0]['nested'])
class TestDatastoreInsert(tests.WsgiAppCase):
sysadmin_user = None
normal_user = None
@classmethod
def setup_class(cls):
if not tests.is_datastore_supported():
raise nose.SkipTest("Datastore not supported")
p.load('timeseries')
ctd.CreateTestData.create()
cls.sysadmin_user = model.User.get('testsysadmin')
cls.normal_user = model.User.get('annafan')
set_url_type(
model.Package.get('annakarenina').resources, cls.sysadmin_user)
resource = model.Package.get('annakarenina').resources[0]
cls.data = {
'resource_id': resource.id,
'fields': [{'id': u'b\xfck', 'type': 'text'},
{'id': 'author', 'type': 'text'},
{'id': 'nested', 'type': 'json'},
{'id': 'characters', 'type': 'text[]'},
{'id': 'published'}],
'primary_key': u'b\xfck',
'records': [{u'b\xfck': 'annakarenina', 'author': 'tolstoy',
'published': '2005-03-01', 'nested': ['b', {'moo': 'moo'}]},
{u'b\xfck': 'warandpeace', 'author': 'tolstoy',
'nested': {'a':'b'}}
]
}
postparams = '%s=1' % json.dumps(cls.data)
auth = {'Authorization': str(cls.sysadmin_user.apikey)}
res = cls.app.post('/api/action/datastore_ts_create', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
engine = db._get_engine(
{'connection_url': pylons.config['ckan.datastore.write_url']})
cls.Session = orm.scoped_session(orm.sessionmaker(bind=engine))
@classmethod
def teardown_class(cls):
p.unload('timeseries')
rebuild_all_dbs(cls.Session)
def test_insert_non_existing_field(self):
data = {
'resource_id': self.data['resource_id'],
'method': 'insert',
'records': [{u'b\xfck': 'the hobbit', 'dummy': 'tolkien'}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_ts_upsert', params=postparams,
extra_environ=auth, status=409)
res_dict = json.loads(res.body)
assert res_dict['success'] is False
def test_insert_with_index_violation(self):
data = {
'resource_id': self.data['resource_id'],
'method': 'insert',
'records': [{u'b\xfck': 'annakarenina'}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_ts_upsert', params=postparams,
extra_environ=auth, status=409)
res_dict = json.loads(res.body)
assert res_dict['success'] is False
def test_insert_basic(self):
hhguide = u"hitchhiker's guide to the galaxy"
data = {
'resource_id': self.data['resource_id'],
'method': 'insert',
'records': [{
'author': 'adams',
'characters': ['Arthur Dent', 'Marvin'],
'nested': {'foo': 'bar', 'baz': 3},
u'b\xfck': hhguide}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_ts_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
c = self.Session.connection()
results = c.execute('select * from "{0}"'.format(self.data['resource_id']))
self.Session.remove()
assert results.rowcount == 3
class TestDatastoreUpdate(tests.WsgiAppCase):
sysadmin_user = None
normal_user = None
@classmethod
def setup_class(cls):
if not tests.is_datastore_supported():
raise nose.SkipTest("Datastore not supported")
p.load('timeseries')
ctd.CreateTestData.create()
cls.sysadmin_user = model.User.get('testsysadmin')
cls.normal_user = model.User.get('annafan')
set_url_type(
model.Package.get('annakarenina').resources, cls.sysadmin_user)
resource = model.Package.get('annakarenina').resources[0]
hhguide = u"hitchhiker's guide to the galaxy"
cls.data = {
'resource_id': resource.id,
'fields': [{'id': u'b\xfck', 'type': 'text'},
{'id': 'author', 'type': 'text'},
{'id': 'nested', 'type': 'json'},
{'id': 'characters', 'type': 'text[]'},
{'id': 'published'}],
'primary_key': u'b\xfck',
'records': [{u'b\xfck': 'annakarenina', 'author': 'tolstoy',
'published': '2005-03-01', 'nested': ['b', {'moo': 'moo'}]},
{u'b\xfck': 'warandpeace', 'author': 'tolstoy',
'nested': {'a':'b'}},
{'author': 'adams',
'characters': ['Arthur Dent', 'Marvin'],
'nested': {'foo': 'bar'},
u'b\xfck': hhguide}
]
}
postparams = '%s=1' % json.dumps(cls.data)
auth = {'Authorization': str(cls.sysadmin_user.apikey)}
res = cls.app.post('/api/action/datastore_ts_create', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
engine = db._get_engine(
{'connection_url': pylons.config['ckan.datastore.write_url']})
cls.Session = orm.scoped_session(orm.sessionmaker(bind=engine))
@classmethod
def teardown_class(cls):
p.unload('timeseries')
rebuild_all_dbs(cls.Session)
def test_update_basic(self):
c = self.Session.connection()
results = c.execute('select 1 from "{0}"'.format(self.data['resource_id']))
assert results.rowcount == 3, results.rowcount
self.Session.remove()
hhguide = u"hitchhiker's guide to the galaxy"
data = {
'resource_id': self.data['resource_id'],
'method': 'update',
'records': [{
'author': 'adams',
'characters': ['Arthur Dent', 'Marvin'],
'nested': {'baz': 3},
u'b\xfck': hhguide}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_ts_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
c = self.Session.connection()
results = c.execute('select * from "{0}"'.format(self.data['resource_id']))
assert results.rowcount == 3
records = results.fetchall()
assert records[2][u'b\xfck'] == hhguide
assert records[2].author == 'adams'
self.Session.remove()
c = self.Session.connection()
results = c.execute("select * from \"{0}\" where author='{1}'".format(self.data['resource_id'], 'adams'))
assert results.rowcount == 1
self.Session.remove()
# update only the publish date
data = {
'resource_id': self.data['resource_id'],
'method': 'update',
'records': [{'published': '1979-1-1', u'b\xfck': hhguide}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_ts_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
c = self.Session.connection()
results = c.execute('select * from "{0}"'.format(self.data['resource_id']))
self.Session.remove()
assert results.rowcount == 3
records = results.fetchall()
assert records[2][u'b\xfck'] == hhguide
assert records[2].author == 'adams'
assert records[2].published == datetime.datetime(1979, 1, 1)
# delete publish date
data = {
'resource_id': self.data['resource_id'],
'method': 'update',
'records': [{u'b\xfck': hhguide, 'published': None}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_ts_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
c = self.Session.connection()
results = c.execute('select * from "{0}"'.format(self.data['resource_id']))
self.Session.remove()
assert results.rowcount == 3
records = results.fetchall()
assert records[2][u'b\xfck'] == hhguide
assert records[2].author == 'adams'
assert records[2].published == None
def test_update_missing_key(self):
data = {
'resource_id': self.data['resource_id'],
'method': 'update',
'records': [{'author': 'tolkien'}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_ts_upsert', params=postparams,
extra_environ=auth, status=409)
res_dict = json.loads(res.body)
assert res_dict['success'] is False
def test_update_non_existing_key(self):
data = {
'resource_id': self.data['resource_id'],
'method': 'update',
'records': [{u'b\xfck': '', 'author': 'tolkien'}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_ts_upsert', params=postparams,
extra_environ=auth, status=409)
res_dict = json.loads(res.body)
assert res_dict['success'] is False
def test_update_non_existing_field(self):
data = {
'resource_id': self.data['resource_id'],
'method': 'update',
'records': [{u'b\xfck': 'annakarenina', 'dummy': 'tolkien'}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_ts_upsert', params=postparams,
extra_environ=auth, status=409)
res_dict = json.loads(res.body)
assert res_dict['success'] is False
| namgk/ckan-timeseries | ckanext/timeseries/tests/test_upsert.py | Python | agpl-3.0 | 21,184 | [
"Galaxy"
] | 59ec6bbd33dc17dd057a498177177dd887b3ff04e3721d43e8d66e6c376b8094 |
#
# co_co_sum_has_correct_parameter.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from pynestml.cocos.co_co import CoCo
from pynestml.symbols.predefined_functions import PredefinedFunctions
from pynestml.utils.logger import Logger, LoggingLevel
from pynestml.utils.messages import Messages
from pynestml.visitors.ast_visitor import ASTVisitor
from pynestml.meta_model.ast_neuron import ASTNeuron
from pynestml.meta_model.ast_function_call import ASTFunctionCall
from pynestml.meta_model.ast_simple_expression import ASTSimpleExpression
class CoCoSumHasCorrectParameter(CoCo):
"""
This coco ensures that cur_sum,cond_sum and convolve get only simple variable references as inputs.
Not allowed:
V mV = convolve(g_in+g_ex,Buffer)
"""
name = 'convolve is correctly built'
description = 'TODO'
def __init__(self):
self.neuronName = None
def check_co_co(self, neuron):
"""
Ensures the coco for the handed over neuron.
:param neuron: a single neuron instance.
:type neuron: ASTNeuron
"""
self.neuronName = neuron.get_name()
neuron.accept(SumIsCorrectVisitor())
return
class SumIsCorrectVisitor(ASTVisitor):
"""
This visitor ensures that sums/convolve are provided with a correct rhs.
"""
def visit_function_call(self, node):
"""
Checks the coco on the current function call.
:param node: a single function call.
:type node: ASTFunctionCall
"""
f_name = node.get_name()
if f_name == PredefinedFunctions.CURR_SUM or \
f_name == PredefinedFunctions.COND_SUM or f_name == PredefinedFunctions.CONVOLVE:
for arg in node.get_args():
if not isinstance(arg, ASTSimpleExpression) or not arg.is_variable():
code, message = Messages.get_not_a_variable(str(arg))
Logger.log_message(code=code, message=message,
error_position=arg.get_source_position(), log_level=LoggingLevel.ERROR)
return
| kperun/nestml | pynestml/cocos/co_co_sum_has_correct_parameter.py | Python | gpl-2.0 | 2,733 | [
"NEURON"
] | efce0faed4751887d3be4a8606382baf1c15775d674b89ee868dd89f4be52cd0 |
#!/usr/bin/python
#########################################################################
# Bond Length Calculator #
# Jeff Doak #
# v 1.2 11/29/2011 #
# #
#This program calculates the bond lengths between all the atoms in a #
#unit cell, read from a POSCAR or CONTCAR like file. Bond lengths are #
#calculated between an atom in the unit cell and all other atoms in the #
#same unit cell, and those in the 26 surrounding unit cells, to get all #
#possible nearest neighbor bond lengths. #
# #
#The Bond Length Calculator can output results in several ways. A large #
#table containing all bond lengths (27 per pair of atoms) is written to #
#a file 'bigbond.out'. In addition, Bond Length Calculator can search #
#for a given number of closest bonds between one type of atom and atoms #
#of other, given types. Average bond lengths and standard deviations are#
#output for this calculation. #
#########################################################################
import sys
import numpy as np
from scipy.stats import gaussian_kde
from scipy.stats import histogram
def usage():
usage = """ --Bond Length Calculator--
Jeff Doak v1.1 10-18-2010 jeff.w.doak@gmail.com
Bond Length Calculator can be used to find distances between atoms in a crystal
based on specific criteria.
Currently, Bond Length Calculator can find nearest neighbor distances between
atoms of two sets of elements. The inputs to this program are the number of nearest
neighbors, and two lists of elements (by number according to the input file)."""
print usage
#def get_arguments(argv):
def input_vasp_file(file = "POSCAR"):
"""
Reads in POSCAR/CONTCAR like file line by line.
"""
global name,lat,num_atom_types,atom_type_list,num_atoms,positions
f = open(file,'r')
#read in structure name
line = f.readline().split()
name = ''
for i in range(len(line)):
name = name + line[i] + ' '
#read in scale factor
scale = float(f.readline().split()[0])
#read in unit cell lattice vectors
lat = np.zeros((3,3))
for i in range(3):
line = f.readline().split()
for j in range(3):
lat[i,j]=float(line[j])
#read in number of atom types, number of atoms of each type,
#and total number of atoms
line = f.readline().split()
num_atom_types = len(line)
atom_type_list = []
for i in range(len(line)):
atom_type_list.append(int(line[i]))
num_atoms = 0
for i in range(num_atom_types):
num_atoms += atom_type_list[i]
#read in atom coordinate convention
convention = f.readline().split()[0]
#read in atomic positions
positions = np.zeros((num_atoms,3))
for i in range(num_atoms):
line = f.readline().split()
for j in range(3):
positions[i,j] = float(line[j])
#convert atomic positions to cart coords in not already in them
cees = ['c','C']
if not cees.count(convention[0]):
for i in range(num_atoms):
positions[i] = np.dot(lat.transpose(),positions[i])
#scale atomic positions and lattice vectors by scale factor
lat = lat*scale
positions = positions*scale
f.close()
def write_table_to_file(table,name,file_name = "bigbond.out"):
"""Writes any table to a text file. The table is preceeded by the name of
the structure, as listed in the input file."""
file_ = open(file_name,'w')
file_.write(name+"\n")
for i in range(len(table)):
for j in range(len(table[i])):
file_.write(str(table[i,j])+' ')
file_.write("\n")
file_.close()
def bond_vector_table(positions):
"""Creates a table with bond vectors between each pair of atoms in the
unit cell. The table should be antisymmetric, i.e. the bond vector between
atoms i and j is negative 1 times the bond vector between atoms j and i."""
vector_table = np.zeros((len(positions),len(positions),3))
for i in range(len(positions)):
for j in range(len(positions)):
vector_table[i,j] = positions[i]-positions[j]
return vector_table
def all_neighbor_distances(lat,vector_table,cell_rad = 1):
"""
Creates an n x n*(1+2*cell_rad)**3 table of bond lengths between all n
atoms in the unit cell and every atom in the same unit cell and the
(1+2*cell_rad)**3 -1 surrounding cells (the cell_rad number of layers of
cells surrounding the central one).
"""
all_neighbor_table = np.zeros((len(vector_table),len(vector_table)*(1+2*cell_rad)**3))
for i in range(len(vector_table)):
for j in range(len(vector_table)):
n = 0
#loop over all combinations of lattice vectors scaled by the list
#range(-cell_rad,cell_rad+1) (i.e. -1,0,1 for cell_rad=1).
#n indexes the current position in the list of lattice vector
#permutations. The list proceeds by looping over l, then k, then h.
for h in range(-cell_rad,cell_rad+1):
for k in range(-cell_rad,cell_rad+1):
for l in range(-cell_rad,cell_rad+1):
temp = vector_table[i,j]+h*lat[0]+k*lat[1]+l*lat[2]
all_neighbor_table[i,j*(1+2*cell_rad)**3+n] = np.linalg.norm(temp)
n += 1
return all_neighbor_table
def list_atoms_of_type(subset,num_atom_types,atom_type_list):
"""Creates a list containing the numbers of all atoms of the atom
types in the list subset."""
atom_subset_list = []
n = 0
for i in range(num_atom_types):
for j in range(atom_type_list[i]):
if subset.count(i):
atom_subset_list.append(n)
n += 1
return atom_subset_list
def find_nearest_neighbors(big_table,a_type_list,b_type_list,num_nn,num_atom_types,atom_type_list):
"""Search through the table of all neighbor atom distances to find the
num_nn nearest neighbors between atoms of types a_type and atoms of types
b_type."""
a_atom_list = list_atoms_of_type(a_type_list,num_atom_types,atom_type_list)
b_atom_list = list_atoms_of_type(b_type_list,num_atom_types,atom_type_list)
bonds_per_pair = len(big_table[0])/len(big_table)
#Make list of all bonds containing atoms in b_atom_list
full_b_list = []
for i in b_atom_list:
for j in range(bonds_per_pair):
full_b_list.append(i*bonds_per_pair+j)
#For each atom in a_atom_list, find the num_nn shortest bonds to atoms
#in b_atom_list (full_b_list)
bond_table = np.zeros((len(a_atom_list),num_nn))
for i in range(len(a_atom_list)):
for j in range(len(full_b_list)):
#Ignore same atom distances (i.e. a length of 0)
if big_table[a_atom_list[i],full_b_list[j]] != 0.0:
if j < num_nn:
bond_table[i,j] = big_table[a_atom_list[i],full_b_list[j]]
elif big_table[a_atom_list[i],full_b_list[j]] < bond_table[i].max():
bond_table[i,bond_table[i].argmax()] = big_table[a_atom_list[i],full_b_list[j]]
bond_stats = [bond_table.mean(),bond_table.std()]
return bond_table,bond_stats
#return bond_stats
def NN_config(big_table,a_list,b_list,num_nn):
"""
Sorts the table of all bond lenghts to find the num_nn closest bond lengths.
"""
atom_types = [ i for i in range(len(atom_type_list)) for j in range(atom_type_list[i]) ]
a_atom_list = list_atoms_of_type(a_list,num_atom_types,atom_type_list)
bond_table = []
config_table = []
for i in a_atom_list:
pos = np.sort(big_table[i])[1:num_nn+1]
atoms = np.argsort(big_table[i])[1:num_nn+1]
atoms = [ orig_atom(j,big_table) for j in atoms ]
types = [ atom_types[k]-1 for k in atoms ]
num_b = types.count(b_list[-1])
bond_table.append(pos)
config_table.append(types)
print i,pos,atoms,types,types.count(1)
print bond_table,config_table
return np.array(bond_table),np.array(config_table)
def orig_atom(atom,big_table):
"""
Returns the 0-indexed position of atom from the full list of atoms in the
supercell mapped back into the original unit cell
"""
n = 0
bonds_per_pair = len(big_table[0])/len(big_table)
while atom > bonds_per_pair:
atom -= bonds_per_pair
n += 1
# n is the index of atom in the original atom list
# atom is now the index of the unit cell the atom came from
return n
def config_stats(bond_table,config_table,a_list,b_list,num_nn,elements):
"""
Determine the configurations of b-type atoms around each a-type atom, and
average the bond lengths for (i) each a-type atom and (ii) each a-type atom
with the same NN-config.
"""
a_atom_list = list_atoms_of_type(a_list,num_atom_types,atom_type_list)
b_atom_list = list_atoms_of_type(b_list,num_atom_types,atom_type_list)
n_b_types = len(b_list)
config_list = [ [[],[]] for i in range(num_nn+1) ]
for i in range(len(bond_table)):
num_b = 0
bond_a = []
bond_b = []
for j in range(num_nn):
if config_table[i,j] == 1:
num_b += 1
bond_b.append(bond_table[i,j])
else:
bond_a.append(bond_table[i,j])
avg_a = np.mean(bond_a)
std_a = np.std(bond_a)
avg_b = np.mean(bond_b)
std_b = np.std(bond_b)
print "A-atom",i,"# NN-B",num_b
print bond_table[i],config_table[i]
print " avg b_A",avg_a,"Ang, std b_A",std_a,"Ang"
print " avg b_B",avg_b,"Ang, std b_B",std_b,"Ang"
print
config_list[num_b][0].append(bond_a)
config_list[num_b][1].append(bond_b)
numstr = ""
astr = ""
bstr = ""
for i in range(len(config_list)):
num_a = len(config_list[i][0])
num_b = len(config_list[i][1])
avg_a = np.mean(config_list[i][0])
std_a = np.std(config_list[i][0])
avg_b = np.mean(config_list[i][1])
std_b = np.std(config_list[i][1])
numstr += "& "+str(num_b)+" "
astr += "& $%.3f \pm %.3f$ " % (avg_a,std_a)
bstr += "& $%.3f \pm %.3f$ " % (avg_b,std_b)
print "# X atoms with",i,"B NN atoms:",num_b
#print "avg b_A",avg_a,"std b_A",std_a
#print "avg b_B",avg_b,"std b_B",std_b
print "$%.3f \pm %.3f$" % (avg_a,std_a)
print "$%.3f \pm %.3f$" % (avg_b,std_b)
print
numstr += "\\\\"
astr += "\\\\"
bstr += "\\\\"
print numstr
print astr
print bstr
return config_list
def main(args):
if args[0]:
filename = args[0]
else:
filename = "CONTCAR"
input_vasp_file(filename)
vector_table = bond_vector_table(positions)
all_neighbor_table = all_neighbor_distances(lat,vector_table)
#write_table_to_file(all_neighbor_table,name)
#for GePbTe sqs half, Ge-0,Pb-1,Te-2
#for PbS-PbTe sqs half Pb-0 S-1 Te-2
#Edit these numbers!!!!!!!!!!
a_list = [[0]]
b_list = [1,2]
num_nn = [6] # Number of nearest neighbors between atoms of type a and b
bond_stats=[]
bond_table = []
for i in range(len(a_list)):
table,stats = find_nearest_neighbors(all_neighbor_table,a_list[i],b_list,num_nn[i],num_atom_types,atom_type_list)
bond_table.append(table)
bond_stats.append(stats)
bond_table = np.array(bond_table).flatten()
print "Table of bond lengths"
print np.sort(bond_table)
print "Avg. bond length (Ang), Std. Dev. (Ang)"
print bond_stats
print
gauss = gaussian_kde(bond_table)
#xdata = np.linspace(2.4,4.0,100)
xdata = np.linspace(min(bond_table)-3.*bond_stats[0][1],max(bond_table)+3.*bond_stats[0][1],100)
ydata = gauss(xdata)
print "Gaussian distribution fit"
for i in range(len(xdata)):
print xdata[i],ydata[i]
print
nbins = 10
hist,lowest,binsize,extra = histogram(bond_table,numbins=nbins)
n = lowest
print "histogram data"
print n,"0.0"
for i in range(len(hist)):
print n,hist[i]
n += binsize
print n,hist[i]
print n,"0.0"
print
#Runs the main method if Bond Length Calculator is called from the command line.
if __name__=="__main__":
if len(sys.argv[1:]) <= 1:
main(sys.argv[1:])
else:
name = str(sys.argv[1])
elements = [ str(i) for i in sys.argv[2:] ]
input_vasp_file(name)
vector_table = bond_vector_table(positions)
all_neighbor_table = all_neighbor_distances(lat,vector_table)
a_list = [0]
b_list = [1,2]
num_nn = 6
bond_table,config_table = NN_config(all_neighbor_table,a_list,b_list,num_nn)
config_list = config_stats(bond_table,config_table,a_list,b_list,num_nn,elements)
| jeffwdoak/vasp_scripts | vasp_scripts/bondlengths.py | Python | mit | 13,023 | [
"CRYSTAL",
"Gaussian"
] | d38c063f04d2a6db48b0a2c90e33f5a863a0f5ce9b5da62b23540e46c5609c89 |
import numpy as np
import math
from pylab import *
from palettable.wesanderson import Zissou_5 as wsZ
import matplotlib.ticker as mtick
from scipy.interpolate import interp1d
from scipy.interpolate import griddata
import scipy.ndimage as ndimage
#read JP and TH files
#def read_JP_files(fname):
# da = np.genfromtxt(fname, delimiter=" ", comments='#')
# return da[:,0], da[:,1], da[:,2], da[:,3],da[:,4],da[:,5]
#Read JN files
def read_JN_files(fname):
da = np.genfromtxt(fname, delimiter=",")
return da[:,0],da[:,1],da[:,2],da[:,3],da[:,4],da[:,5],da[:,6],da[:,7],da[:,8]
## Plot
fig = figure(figsize=(9,10), dpi=80)
rc('font', family='serif')
rc('xtick', labelsize='xx-small')
rc('ytick', labelsize='xx-small')
gs = GridSpec(500, 100)
gs.update(wspace = 0.34)
#gs.update(hspace = 0.4)
lsize = 9.0
#phase limits
xmin = 0.0
xmax = 1.0
#figure shape parameters
panelh = 70
skiph = 30
mfiglim = 0
#path to files
path_files = "../../out_skymaps/"
#labels size
tsize = 8.0
#general parameters
nu = 'f600'
bprof = 'pbb'
rad = 'r15'
mass = 'm1.6'
rho = 'x10'
incls = ['i5','i10','i20','i30','i40','i50','i60','i70','i80','i90']
incls_g = [5,10,20,30,40,50,60,70,80,90]
#fig.text(0.5, 0.92, 'obl+nodel', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.72, 'obl+nodel $\\times$ $\\delta$', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.52, 'obl $\\times$ $\\gamma$', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.32, 'sphere $\\times$ $\\gamma$', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.12, 'Phase',ha='center', va='center', size=lsize)
#pre-read one file to get initial values
colat = 'd10'
incl = incls[0]
fname = path_files + nu+bprof+rad+mass+colat+incl+rho
phase_g, N2kev, N6kev, N12kev, Nbol, Fbol, F2kev, F6kev, F12kev = read_JN_files(fname+'.csv')
Nt = len(phase_g)
phase_t = np.linspace(0.0, 1.0, 200)
incls_t = np.linspace(0.0, 90.0, 100)
maxflux = 0.0
fig.text(0.3, 0.92, 'One spot', ha='center', va='center', size=10)
fig.text(0.7, 0.92, 'Two antipodal spots', ha='center', va='center', size=10)
for j in range(5):
for k in range(2):
#frame for the main pulse profile fig
#ax1 = subplot(gs[mfiglim:mfiglim+panelh, k])
if k == 0:
ax1 = subplot(gs[mfiglim:mfiglim+panelh, 0:46])
else:
ax1 = subplot(gs[mfiglim:mfiglim+panelh, 49:95])
ax1.minorticks_on()
#ax1.set_xticklabels([])
ax1.set_xlim(xmin, xmax)
#ax1.set_ylabel('$N$ (2 keV)\n[ph cm$^{-2}$ s$^{-1}$ keV$^{-1}$]',size=lsize)
#ax1.set_ylabel('$N$ (6 keV)',size=lsize)
#ax1.set_ylabel('$N$ (12 keV)',size=lsize)
#ax1.set_ylabel('Bolometric [ph cm$^{-2}$ s$^{-1}$]',size=lsize)
if j == 4:
ax1.set_xlabel('Phase', size=lsize)
if k == 0:
ax1.set_ylabel('Inclination $i$', size=lsize)
else:
ax1.set_yticklabels([])
if j == 0:
colat = '10'
elif j == 1:
colat = '30'
elif j == 2:
colat = '50'
elif j == 3:
colat = '70'
elif j == 4:
colat = '90'
skymap = np.zeros((Nt, len(incls)))
#skymap = np.zeros((len(incls), Nt))
for q in range(len(incls)):
incl = incls[q]
#incl = incls[0]
fname = path_files + nu+bprof+rad+mass+'d'+colat+incl+rho
phase, N2kev, N6kev, N12kev, Nbol, Fbol, F2kev, F6kev, F12kev = read_JN_files(fname+'.csv')
#add second spot
if k == 1:
phase2, N2kev2, N6kev2, N12kev2, Nbol2, Fbol2, F2kev2, F6kev2, F12kev2 = read_JN_files(fname+'_2nd.csv')
N2kev += N2kev2
N6kev += N6kev2
N12kev += N12kev2
Nbol += Nbol2
Fbol += Fbol2
F2kev += F2kev2
F6kev += F6kev2
F12kev += F12kev2
#build flux matrix
flux = Fbol
#flux = flux - flux.min()
#flux = flux / flux.max()
skymap[:,q] = flux
#skymap[q,:] = flux
#JN data
#ax1.plot(phase, flux, 'k-')
#fluxi2 = griddata(phase2, flux2, (phase), method='cubic')
#skymap[:,0] = np.linspace(0.0, 1.0, Nt)
#skymap[:,-1] = np.ones(Nt)
print skymap.max()
print shape(skymap)
#print skymap
#skymap_interp = griddata((phase_g, incls_g), skymap, (phase_t, incls_t), method='cubic')
#skymap_interp = griddata((phase_g, incls_g), skymap, np.meshgrid(phase_t, incls_t), method='cubic')
#print skymap_interp
xr0 = phase_g[0]
xr1 = phase_g[-1]
yr0 = incls_g[0]
yr1 = incls_g[-1]
#print xr0, xr1, yr0, yr1
extent = [xr0, xr1, yr0, yr1]
skymap_smooth = ndimage.gaussian_filter(skymap, sigma=0.5, order=0)
skymap_masked = np.ma.masked_where(skymap <= 0.001, skymap)
#im = ax1.imshow(skymap_masked.T,
im = ax1.imshow(skymap.T,
#interpolation='nearest',
interpolation='gaussian',
origin='lower',
extent=extent,
#cmap='Reds',
#cmap='jet',
#cmap='YlGnBu',
cmap='plasma_r',
vmin=0.0,
vmax=0.6,
#vmax=1.0,
aspect='auto')
levels = [0.001]
#levels = [0.05,0.95]
cs1 = ax1.contour(skymap_smooth.T,
#cs1 = ax1.contour(skymap.T,
levels,
colors = 'r',
origin='lower',
extent=extent)
zc = cs1.collections[0]
setp(zc, linewidth=1)
# levels = [0.5]
# cs1 = ax1.contour(skymap_smooth.T,
# #cs1 = ax1.contour(skymap.T,
# levels,
# colors = 'b',
# origin='lower',
# extent=extent)
# zc = cs1.collections[0]
# setp(zc, linewidth=1)
if k == 1:
#mfiglim:mfiglim+panelh, 0:40])
#cbaxes = fig.add_axes([0.90, (mfiglim+panelh)/500, 0.05, panelh/500.0])
cbaxes = subplot(gs[mfiglim:mfiglim+panelh, 95:97])
cb = colorbar(im,
#label='Probability density',
cax=cbaxes)
cb.set_label('Bolometric flux\n[ph cm$^{-2}$ s$^{-1}$]',size=lsize)
fig.text(0.5, 0.91-j*0.16, '$\\theta_{\mathrm{s}}$ = '+colat+'$^{\\circ}$', ha='center', va='center', size=tsize)
mfiglim += panelh+skiph
savefig('fig7.pdf', bbox_inches='tight')
| natj/bender | paper/figs/fig7.py | Python | mit | 6,973 | [
"Gaussian"
] | 532b88b410875ef90a5a020f494f7fb779d67c7c74f1babebbcf0ab5a7ce7185 |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import chain
from operator import attrgetter
from robot.utils import setter
from .itemlist import ItemList
from .message import Message, Messages
from .modelobject import ModelObject
class Keyword(ModelObject):
"""Base model for single keyword."""
__slots__ = ['parent', 'name', 'doc', 'args', 'type', 'timeout',
'_sort_key', '_next_child_sort_key']
KEYWORD_TYPE = 'kw'
SETUP_TYPE = 'setup'
TEARDOWN_TYPE = 'teardown'
FOR_LOOP_TYPE = 'for'
FOR_ITEM_TYPE = 'foritem'
keyword_class = None
message_class = Message
def __init__(self, name='', doc='', args=(), type='kw', timeout=None):
#: :class:`~.model.testsuite.TestSuite` or
#: :class:`~.model.testcase.TestCase` or
#: :class:`~.model.keyword.Keyword` that contains this keyword.
self.parent = None
#: Keyword name.
self.name = name
#: Keyword documentation.
self.doc = doc
#: Keyword arguments, a list of strings.
self.args = args
#: 'SETUP', 'TEARDOWN' or 'KW'.
self.type = type
#: Keyword timeout.
self.timeout = timeout
#: Keyword messages as :class:`~.model.message.Message` instances.
self.messages = None
#: Child keywords as :class:`~.model.keyword.Keyword` instances.
self.keywords = None
self._sort_key = -1
self._next_child_sort_key = 0
@setter
def parent(self, parent):
if parent and parent is not self.parent:
self._sort_key = getattr(parent, '_child_sort_key', -1)
return parent
@property
def _child_sort_key(self):
self._next_child_sort_key += 1
return self._next_child_sort_key
@setter
def keywords(self, keywords):
return Keywords(self.keyword_class or self.__class__, self, keywords)
@setter
def messages(self, messages):
return Messages(self.message_class, self, messages)
@property
def children(self):
"""Child keywords and messages in creation order."""
# It would be cleaner to store keywords/messages in same `children`
# list and turn `keywords` and `messages` to properties that pick items
# from it. That would require bigger changes to the model, though.
return sorted(chain(self.keywords, self.messages),
key=attrgetter('_sort_key'))
@property
def id(self):
if not self.parent:
return 'k1'
return '%s-k%d' % (self.parent.id, self.parent.keywords.index(self)+1)
def visit(self, visitor):
visitor.visit_keyword(self)
class Keywords(ItemList):
__slots__ = []
def __init__(self, keyword_class=Keyword, parent=None, keywords=None):
ItemList.__init__(self, keyword_class, {'parent': parent}, keywords)
@property
def setup(self):
return self[0] if (self and self[0].type == 'setup') else None
@property
def teardown(self):
return self[-1] if (self and self[-1].type == 'teardown') else None
@property
def all(self):
return self
@property
def normal(self):
for kw in self:
if kw.type not in ('setup', 'teardown'):
yield kw
def __setitem__(self, index, item):
old = self[index]
ItemList.__setitem__(self, index, item)
self[index]._sort_key = old._sort_key
| yahman72/robotframework | src/robot/model/keyword.py | Python | apache-2.0 | 4,031 | [
"VisIt"
] | 2c2577dd9d22e536f9126fa14e8cdb6611a322baa438b534a3ad70abe5cc081f |
"""
Tests related to the cohorting feature.
"""
from uuid import uuid4
from nose.plugins.attrib import attr
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.discussion import DiscussionTabSingleThreadPage, InlineDiscussionPage
from common.test.acceptance.tests.discussion.helpers import BaseDiscussionMixin, BaseDiscussionTestCase, CohortTestMixin
from common.test.acceptance.tests.helpers import UniqueCourseTest
class NonCohortedDiscussionTestMixin(BaseDiscussionMixin):
"""
Mixin for tests of discussion in non-cohorted courses.
"""
def setup_cohorts(self):
"""
No cohorts are desired for this mixin.
"""
pass
def test_non_cohort_visibility_label(self):
self.setup_thread(1)
self.assertEquals(self.thread_page.get_group_visibility_label(), "This post is visible to everyone.")
class CohortedDiscussionTestMixin(BaseDiscussionMixin, CohortTestMixin):
"""
Mixin for tests of discussion in cohorted courses.
"""
def setup_cohorts(self):
"""
Sets up the course to use cohorting with a single defined cohort.
"""
self.setup_cohort_config(self.course_fixture)
self.cohort_1_name = "Cohort 1"
self.cohort_1_id = self.add_manual_cohort(self.course_fixture, self.cohort_1_name)
def test_cohort_visibility_label(self):
# Must be moderator to view content in a cohort other than your own
AutoAuthPage(self.browser, course_id=self.course_id, roles="Moderator").visit()
self.thread_id = self.setup_thread(1, group_id=self.cohort_1_id)
# Enable cohorts and verify that the post shows to cohort only.
self.enable_cohorting(self.course_fixture)
self.enable_always_divide_inline_discussions(self.course_fixture)
self.refresh_thread_page(self.thread_id)
self.assertEquals(
self.thread_page.get_group_visibility_label(),
"This post is visible only to {}.".format(self.cohort_1_name)
)
# Disable cohorts and verify that the post now shows as visible to everyone.
self.disable_cohorting(self.course_fixture)
self.refresh_thread_page(self.thread_id)
self.assertEquals(self.thread_page.get_group_visibility_label(), "This post is visible to everyone.")
class DiscussionTabSingleThreadTest(BaseDiscussionTestCase):
"""
Tests for the discussion page displaying a single thread.
"""
def setUp(self):
super(DiscussionTabSingleThreadTest, self).setUp()
self.setup_cohorts()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def setup_thread_page(self, thread_id):
self.thread_page = DiscussionTabSingleThreadPage(self.browser, self.course_id, self.discussion_id, thread_id) # pylint: disable=attribute-defined-outside-init
self.thread_page.visit()
# pylint: disable=unused-argument
def refresh_thread_page(self, thread_id):
self.browser.refresh()
self.thread_page.wait_for_page()
@attr(shard=5)
class CohortedDiscussionTabSingleThreadTest(DiscussionTabSingleThreadTest, CohortedDiscussionTestMixin):
"""
Tests for the discussion page displaying a single cohorted thread.
"""
# Actual test method(s) defined in CohortedDiscussionTestMixin.
pass
@attr(shard=5)
class NonCohortedDiscussionTabSingleThreadTest(DiscussionTabSingleThreadTest, NonCohortedDiscussionTestMixin):
"""
Tests for the discussion page displaying a single non-cohorted thread.
"""
# Actual test method(s) defined in NonCohortedDiscussionTestMixin.
pass
class InlineDiscussionTest(UniqueCourseTest):
"""
Tests for inline discussions
"""
def setUp(self):
super(InlineDiscussionTest, self).setUp()
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
self.course_fixture = CourseFixture(**self.course_info).add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
metadata={"discussion_id": self.discussion_id}
)
)
)
)
).install()
self.setup_cohorts()
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
def setup_thread_page(self, thread_id):
CoursewarePage(self.browser, self.course_id).visit()
self.show_thread(thread_id)
def show_thread(self, thread_id):
discussion_page = InlineDiscussionPage(self.browser, self.discussion_id)
discussion_page.expand_discussion()
self.assertEqual(discussion_page.get_num_displayed_threads(), 1)
discussion_page.show_thread(thread_id)
self.thread_page = discussion_page.thread_page # pylint: disable=attribute-defined-outside-init
def refresh_thread_page(self, thread_id):
self.browser.refresh()
self.show_thread(thread_id)
@attr(shard=5)
class CohortedInlineDiscussionTest(InlineDiscussionTest, CohortedDiscussionTestMixin):
"""
Tests for cohorted inline discussions.
"""
# Actual test method(s) defined in CohortedDiscussionTestMixin.
pass
@attr(shard=5)
class NonCohortedInlineDiscussionTest(InlineDiscussionTest, NonCohortedDiscussionTestMixin):
"""
Tests for non-cohorted inline discussions.
"""
# Actual test method(s) defined in NonCohortedDiscussionTestMixin.
pass
| angelapper/edx-platform | common/test/acceptance/tests/discussion/test_cohorts.py | Python | agpl-3.0 | 5,947 | [
"VisIt"
] | 1f63ff6670395a57addaa8b954e90befee5e14f9490bde0d0ea6fb0dcdf1a31d |
import h5py
import numpy as np
from phonopy.units import VaspToTHz
from phonopy.structure.cells import get_primitive
from upho.phonon.eigenstates import Eigenstates
class BandStructure:
def __init__(self,
paths,
dynamical_matrix,
unitcell_ideal,
primitive_matrix_ideal,
is_eigenvectors=False,
is_band_connection=False,
group_velocity=None,
factor=VaspToTHz,
star="none",
mode="eigenvector",
verbose=False):
"""
Args:
dynamical_matrix:
Dynamical matrix for the (disordered) supercell.
primitive_ideal_wrt_unitcell:
Primitive cell w.r.t. the unitcell (not the supercell).
"""
# ._dynamical_matrix must be assigned for calculating DOS
# using the tetrahedron method.
self._dynamical_matrix = dynamical_matrix
# self._cell is used for write_yaml and _shift_point.
# This must correspond to the "ideal" primitive cell.
primitive_ideal_wrt_unitcell = (
get_primitive(unitcell_ideal, primitive_matrix_ideal))
self._cell = primitive_ideal_wrt_unitcell
self._factor = factor
self._is_eigenvectors = is_eigenvectors
self._is_band_connection = is_band_connection
if is_band_connection:
self._is_eigenvectors = True
self._group_velocity = group_velocity
self._paths = [np.array(path) for path in paths]
self._distances = []
self._distance = 0.
self._special_point = [0.]
self._eigenvalues = None
self._eigenvectors = None
self._frequencies = None
self._star = star
self._mode = mode
self._eigenstates = Eigenstates(
dynamical_matrix,
unitcell_ideal,
primitive_matrix_ideal,
mode=mode,
star=star,
verbose=verbose)
with h5py.File('band.hdf5', 'w') as f:
self._hdf5_file = f
self._write_hdf5_header()
self._set_band(verbose=verbose)
def _write_hdf5_header(self):
self._hdf5_file.create_dataset('paths', data=self._paths)
def _set_initial_point(self, qpoint):
self._lastq = qpoint.copy()
def _shift_point(self, qpoint):
self._distance += np.linalg.norm(
np.dot(qpoint - self._lastq,
np.linalg.inv(self._cell.get_cell()).T))
self._lastq = qpoint.copy()
def _set_band(self, verbose=False):
for ipath, path in enumerate(self._paths):
self._set_initial_point(path[0])
self._solve_dm_on_path(ipath, path, verbose)
self._special_point.append(self._distance)
def _solve_dm_on_path(self, ipath, path, verbose):
eigenstates = self._eigenstates
is_nac = self._dynamical_matrix.is_nac()
for ip, q in enumerate(path):
self._shift_point(q)
if is_nac:
raise ValueError('NAC is not implemented yet for unfolding')
eigenstates.set_distance(self._distance)
eigenstates.extract_eigenstates(q)
group = '{}/{}/'.format(ipath, ip)
eigenstates.write_hdf5(self._hdf5_file, group=group)
def get_unitcell_orig(self):
unitcell_orig = self._dynamical_matrix.get_primitive()
return unitcell_orig
def get_reduced_elements(self):
unitcell_orig = self.get_unitcell_orig()
elements = unitcell_orig.get_chemical_symbols()
reduced_elements = sorted(set(elements), key=elements.index)
return reduced_elements
| yuzie007/ph_unfolder | upho/phonon/band_structure.py | Python | mit | 3,762 | [
"phonopy"
] | 6eb58046f2cf6d25b5932c7ad87dbac1fcf1913a74521354c05ff7659e6e2580 |
# -*- coding: utf-8 -*-
# @Author: YangZhou
# @Date: 2017-06-13 00:44:48
# @Last Modified by: YangZhou
# @Last Modified time: 2017-06-23 18:53:35
import aces.config as config
from ase import io
from aces.graph import plot, series
import numpy as np
from aces.runners.phonopy import runner as Runner
import pandas as pd
from aces.graph import fig, pl
from aces.tools import passthru, toString, cd,\
to_txt, shell_exec, mkdir, cp, ls
from aces.algorithm.kpoints import filter_along_direction
from aces.io.shengbte import get_w_final, get_qpoints, get_omega, get_tau, get_v
class runner(Runner):
def fc3(self):
self.force_constant3()
def force_constant3(self):
cmd = 'find dirs/dir_3RD.* -name vasprun.xml |sort -n|' + \
config.thirdorder + " reap" + self.getcut()
passthru(cmd)
def getcut(self):
m = self.m
cut = str(m.shengcut / 10.0)
if m.shengcut < 0:
cut = str(m.shengcut)
return " %s %s " % (toString(m.supercell3), cut)
def generate_supercells3(self):
# generate supercells
cmd = config.thirdorder + "sow" + self.getcut()
print(cmd)
passthru(cmd)
def getControl(self):
m = self.m
f = open('CONTROL', 'w')
atoms = io.read('../POSCAR') # m.atoms
elements = m.elements
# shengbte needs nelements <=natoms
if len(elements) > len(atoms):
elements = elements[:len(atoms)]
allocations = """&allocations
\tnelements=%d
\tnatoms=%d
\tngrid(:)=%s
&end
""" % (len(elements), len(atoms), toString(m.kpoints))
cell = atoms.cell
types = toString(
[m.elements.index(x) + 1 for x in atoms.get_chemical_symbols()])
pos = ""
for i, atom in enumerate(atoms):
tu = (i + 1, toString(atoms.get_scaled_positions()[i]))
pos += " positions(:,%d)=%s\n" % tu
crystal = """&crystal
lfactor=0.1,
lattvec(:,1)=%s
lattvec(:,2)=%s
lattvec(:,3)=%s
elements=%s
types=%s
%s
scell(:)=%s
&end
""" % (toString(cell[0]), toString(cell[1]), toString(cell[2]),
' '.join(map(lambda x: '"' + x + '"', elements)), types, pos,
m.dim)
parameters = """¶meters
T=%f
scalebroad=1.0
&end
""" % (m.T)
flags = """
&flags
nonanalytic=.TRUE.
nanowires=.FALSE.
&end
"""
f.write(allocations)
f.write(crystal)
f.write(parameters)
f.write(flags)
f.close()
def sca(self, th=0.0):
qpoints_full = np.loadtxt('BTE.qpoints_full')
ks = qpoints_full[:, 2:4]
f = filter_along_direction(ks, th, eps=0.5)
ids = qpoints_full[:, 1].astype(np.int)[f]
qpoints = np.loadtxt('BTE.qpoints')
idx = qpoints[:, 0].astype(np.int)
u = [list(idx).index(i) for i in ids]
w = np.loadtxt('BTE.w_final')
omega = np.loadtxt('BTE.omega') / (2.0 * np.pi)
# w[omega<omega.flatten().max()*0.005]=float('nan')
tao = 1.0 / w + 1e-6
rt = tao[u, :3]
rom = omega[u, :3]
data = []
n, m = rom.shape
for i in range(m):
data.append([rom[:, i], rt[:, i], 'b'])
series(
xlabel='Frequency (THz)',
ylabel='Relaxation Time (ps)',
datas=data,
filename='scaling-%f.png' % th,
scatter=True,
legend=False,
logx=True,
logy=True)
def sca1(self):
qpoints_full = np.loadtxt('BTE.qpoints_full')
ks = qpoints_full[:, 2:4]
f = self.norm(ks, 2.3)
ids = qpoints_full[:, 1].astype(np.int)[f]
qpoints = np.loadtxt('BTE.qpoints')
idx = qpoints[:, 0].astype(np.int)
u = [list(idx).index(i) for i in ids]
w = np.loadtxt('BTE.w_final')
omega = np.loadtxt('BTE.omega') / (2.0 * np.pi)
w[omega < omega.flatten().max() * 0.005] = float('nan')
tao = 1.0 / w + 1e-6
rt = tao[u, :3]
rom = omega[u, :3]
data = []
n, m = rom.shape
for i in range(m):
data.append([rom[:, i], rt[:, i], 'b'])
series(
xlabel='Frequency (THz)',
ylabel='Relaxation Time (ps)',
datas=data,
filename='norm.png',
scatter=True,
legend=False,
logx=True,
logy=True)
def norm(self, ks, r):
filter = np.abs(np.linalg.norm(ks, axis=1) - r) < 1
return filter
def sca3(self):
qpoints_full = np.loadtxt('BTE.qpoints_full')
ks = qpoints_full[:, 2:4]
f = self.kx(ks, 2.3)
ids = qpoints_full[:, 1].astype(np.int)[f]
qpoints = np.loadtxt('BTE.qpoints')
idx = qpoints[:, 0].astype(np.int)
u = [list(idx).index(i) for i in ids]
w = np.loadtxt('BTE.w_final')
omega = np.loadtxt('BTE.omega') / (2.0 * np.pi)
w[omega < omega.flatten().max() * 0.005] = float('nan')
tao = 1.0 / w + 1e-6
rt = tao[u, :3]
rom = omega[u, :3]
data = []
n, m = rom.shape
for i in range(m):
data.append([rom[:, i], rt[:, i], 'b'])
series(
xlabel='Frequency (THz)',
ylabel='Relaxation Time (ps)',
datas=data,
filename='kx.png',
scatter=True,
legend=False,
logx=True,
logy=True)
def kx(self, ks, r):
filter = np.abs(ks[:, 0] - r) < 0.25
return filter
def sca2(self):
w = np.loadtxt('BTE.w_final')
omega = np.loadtxt('BTE.omega') / (2.0 * np.pi)
w[omega < omega.flatten().max() * 0.005] = float('nan')
tao = 1.0 / w + 1e-6
rt = tao[50:55, :]
rom = omega[50:55, :]
data = []
n, m = rom.shape
for i in range(n):
data.append([rom[i, :], rt[i, :], 'b'])
series(
xlabel='Frequency (THz)',
ylabel='Relaxation Time (ps)',
datas=data,
filename='k.png',
scatter=True,
legend=False,
logx=True,
logy=True)
def postT(self):
a = np.loadtxt("BTE.KappaTensorVsT_CONV")
with fig('T_kappa.png', legend=True):
ts = a[:, 0]
fil = ts <= 800
k1 = a[fil, 1]
k2 = a[fil, 5]
k3 = a[fil, 9]
ts = a[fil, 0]
pl.plot(ts, k1, lw=2, label="${\kappa_{xx}}$")
pl.plot(ts, k2, lw=2, label="${\kappa_{yy}}$")
pl.plot(ts, k3, lw=2, label="${\kappa_{zz}}$")
pl.xlabel("Tempeature (K)")
pl.ylabel('Thermal Conductivity (W/mK)')
def grtao(self):
cd('T300K')
# 画格林艾森系数与驰豫时间的关系
w = np.loadtxt('BTE.w_final')[:, 1]
w = np.abs(w)
q = np.loadtxt(open('../BTE.qpoints'))
n = len(q)
w = w.T.reshape([-1, n])
w = np.einsum('jk->kj', w)
w.flags.writeable = True
omega = np.loadtxt('../BTE.omega') / (2.0 * np.pi)
w[omega < omega.flatten().max() * 0.005] = float('nan')
tao = 1.0 / w + 1e-6
g = np.loadtxt('../BTE.gruneisen')
with fig("gruneisen_tao.png"):
pl.semilogy(
g.flatten(),
tao.flatten(),
ls='.',
marker='.',
color='r',
markersize=10)
pl.ylabel('Relaxation Time (ps)')
pl.xlabel('Gruneisen Coeffecient')
pl.xlim([-10, 5])
pl.ylim([0, 1e4])
def post(self):
cd('T300K')
try:
df = pd.read_csv(
"BTE.kappa_scalar",
sep=r"[ \t]+",
header=None,
names=['step', 'kappa'],
engine='python')
ks = np.array(df['kappa'])
plot(
(np.array(df['step']), 'Iteration Step'),
(ks, 'Thermal Conductivity (W/mK)'),
'kappa_scalar.png',
grid=True,
linewidth=2)
except Exception as e:
print(e)
try:
df = pd.read_csv(
"BTE.cumulative_kappa_scalar",
sep=r"[ \t]+",
header=None,
names=['l', 'kappa'],
engine='python')
ks = np.array(df['kappa'])
plot(
(np.array(df['l']),
'Cutoff Mean Free Path for Phonons (Angstrom)'),
(ks, 'Thermal Conductivity (W/mK)'),
'cumulative_kappa_scalar.png',
grid=True,
linewidth=2,
logx=True)
except Exception as e:
print(e)
try:
omega = np.loadtxt('../BTE.omega') / (2.0 * np.pi)
kappa = np.loadtxt('BTE.kappa')[-1, 1:]
kappa = np.einsum('jji', kappa.reshape([3, 3, -1])) / 3.0
plot(
(np.arange(len(omega[0])), 'Band'),
(kappa, 'Thermal Conductivity (W/mK)'),
'kappa_band.png',
grid=True,
linewidth=2)
plot(
(np.arange(len(omega[0])), 'Band'),
(kappa.cumsum(), 'Thermal Conductivity (W/mK)'),
'cumulative_kappa_band.png',
grid=True,
linewidth=2)
except Exception as e:
print(e)
try:
kappa = np.loadtxt('BTE.cumulative_kappaVsOmega_tensor')
with fig("atc_freq.png"):
pl.plot(kappa[:, 0], kappa[:, 1], label="${\kappa_{xx}}$")
pl.plot(kappa[:, 0], kappa[:, 5], label="${\kappa_{xx}}$")
pl.plot(kappa[:, 0], kappa[:, 9], label="${\kappa_{xx}}$")
pl.xlabel("Frequency (THz)")
pl.ylabel("Cumulative Thermal Conductivity(W/mK)")
with fig("tc_freq.png"):
pl.plot(
kappa[:, 0],
np.gradient(kappa[:, 1]),
label="${\kappa_{xx}}$")
pl.plot(
kappa[:, 0],
np.gradient(kappa[:, 5]),
label="${\kappa_{xx}}$")
pl.plot(
kappa[:, 0],
np.gradient(kappa[:, 9]),
label="${\kappa_{xx}}$")
pl.xlabel("Frequency (THz)")
pl.ylabel("Cumulative Thermal Conductivity(W/mK)")
except Exception as e:
print(e)
try:
g = np.loadtxt('../BTE.gruneisen')
y = (g.flatten(), 'Gruneisen')
plot(
(omega.flatten(), 'Frequency (THz)'),
y,
'gruneisen_freq.png',
grid=True,
scatter=True)
with fig('gruneisen_freq.png'):
pl.scatter(
omega.flatten(), g.flatten(), marker='.', color='r', s=50)
pl.xlabel('Frequency (THz)')
pl.ylabel('Gruneisen Coeffecient')
# pl.grid(True)
pl.xlim([0, omega.max()])
pl.ylim([-10, 5])
# pl.tick_params(axis='both', which='major', labelsize=14)
to_txt(['freq', 'gruneisen'],
np.c_[omega.flatten(), g.flatten()], 'gruneisen_freq.txt')
g = np.loadtxt('../BTE.P3')
with fig('p3_freq.png'):
pl.scatter(
omega.flatten(),
g.flatten() * 1e6,
marker='.',
color='r',
s=50)
pl.xlabel('Frequency (THz)')
pl.ylabel('P3 $(\\times 10^{-6})$')
# pl.grid(True)
pl.xlim([0, omega.max()])
pl.ylim([0, g.max() * 1e6])
to_txt(['freq', 'p3'],
np.c_[omega.flatten(), g.flatten()], 'p3_freq.txt')
except Exception as e:
print(e)
self.draw_gv()
self.draw_branch_scatter()
self.draw_tau()
cd('..')
def draw_gv(self):
try:
omega = get_omega('..')
tau = get_tau('..')
v = get_v('..')
v = np.linalg.norm(v, axis=-1)
y = (v.flatten(), 'Group Velocity (nm/ps)')
plot(
(omega.flatten(), 'Frequency (THz)'),
y,
'v_freq.png',
grid=True,
scatter=True)
to_txt(['freq', 'vg'],
np.c_[omega.flatten(), v.flatten()], 'v_freq.txt')
l = v * tau
y = (l.flatten(), 'Mean Free Path (nm)')
plot(
(omega.flatten(), 'Frequency (THz)'),
y,
'lamda_freq.png',
grid=True,
scatter=True)
to_txt(['freq', 'mfp'],
np.c_[omega.flatten(), l.flatten()], 'lamda_freq.txt')
except Exception as e:
print(e)
def draw_branch_scatter(self):
try:
w = get_w_final('..')
q = get_qpoints('..')
qnorm = np.linalg.norm(q, axis=1)
data = []
n, m = w.shape
for i in range(m):
data.append([qnorm, w[:, i], 'b'])
series(
xlabel='|q| (1/nm)',
ylabel='Scatter Rate (THz)',
datas=data,
filename='branchscatter.png',
scatter=True,
legend=False,
logx=True,
logy=True)
except Exception as e:
pass
def draw_tau(self):
try:
w = get_w_final('..')
q = get_qpoints('..')
omega = get_omega('..')
tau = get_tau('..')
plot(
(omega.flatten(), 'Frequency (THz)'), (w.flatten(),
'Scatter Rate (THz)'),
'scatter_freq.png',
grid=True,
scatter=True,
logy=True)
plot(
(omega.flatten(), 'Frequency (THz)'), (tau.flatten(),
'Relaxation Time (ps)'),
'tau_freq.png',
grid=True,
scatter=True,
logy=True)
to_txt(['freq', 'tau'],
np.c_[omega.flatten(), tau.flatten()], 'tao_freq.txt')
r = []
for i, qq in enumerate(q):
c = tau[i]
d = omega[i]
for j, cc in enumerate(c):
r.append([qq[0], qq[1], qq[2], d[j], c[j]])
to_txt(['q1', 'q2', 'q3', 'f(THz)', 'tao(ps)'], r, 'q_tao.txt')
except Exception as e:
pass
def vtao(self):
# group velocity vs. tao using old version of shengbte
w = np.loadtxt('BTE.w_final')
w = np.abs(w)
omega = np.loadtxt('BTE.omega') / (2.0 * np.pi)
w[omega < omega.flatten().max() * 0.005] = float('nan')
tao = 1.0 / w + 1e-6
v = np.loadtxt(open('BTE.v'))
n, m = v.shape
v = v.reshape([n, 3, m / 3])
v = np.linalg.norm(v, axis=1)
l = v * tao
l[l < 1e-6] = None
with fig('tao_v.png'):
pl.semilogy(
v.flatten(),
tao.flatten(),
linestyle='.',
marker='.',
color='r',
markersize=5)
pl.xlabel('Group Velocity (nm/ps)')
pl.ylabel('Relaxation Time (ps)')
pl.grid(True)
with fig('tao_l.png'):
pl.loglog(
l.flatten(),
tao.flatten(),
linestyle='.',
marker='.',
color='r',
markersize=5)
pl.xlabel('Mean Free Path (nm)')
pl.ylabel('Relaxation Time (ps)')
pl.grid(True)
with fig('v_l.png'):
pl.semilogy(
v.flatten(),
l.flatten(),
linestyle='.',
marker='.',
color='r',
markersize=5)
pl.xlabel('Group Velocity (nm/ps)')
pl.ylabel('Mean Free Path (nm)')
pl.grid(True)
def getGrid(self):
s = shell_exec("grep ngrid CONTROL")
from scanf import sscanf
grids = sscanf(s, "ngrid(:)=%d %d %d")
return grids
def getQ(self):
# atoms = io.read('../POSCAR')
# rcell = atoms.get_reciprocal_cell()
grid = self.getGrid()
q0 = []
for ii in range(grid[0]):
for jj in range(grid[1]):
for kk in range(grid[2]):
k = [
float(ii) / grid[0] - .5,
float(jj) / grid[1] - .5,
float(kk) / grid[2] - .5
]
# q0.append(np.einsum('ij,i',rcell,k))
q0.append(k)
return np.array(q0)
def getQFmap(self):
qpoints_full = np.loadtxt('BTE.qpoints_full')
qpoints = np.loadtxt('BTE.qpoints')
ids = qpoints_full[:, 1].astype(np.int)
idx = qpoints[:, 0].astype(np.int)
a = {}
for i, id in enumerate(idx):
a[id] = i
u = np.array([a[i] for i in ids])
return u
def taoth(self):
# tao vs. direction in xy plane using old version of shengbte
w = np.loadtxt('BTE.w_final')
w = np.abs(w)
omega = np.loadtxt('BTE.omega') / (2.0 * np.pi)
w[omega < omega.flatten().max() * 0.005] = float('nan')
tao = 1.0 / w + 1e-6
tao[tao > 10000] = 0
tao = np.nan_to_num(tao)
u = self.getQFmap()
tao = tao[u]
# 为了限制q点在BZ,必须自己重新来
# qpoints_full=np.loadtxt('BTE.qpoints_full')
# q=qpoints_full[:,-3:]
q = self.getQ()
with fig('tao_th.png'):
# ax = pl.subplot(111, projection='polar')
N = 100
th = np.linspace(0, 1, N) * np.pi * 2.0 - np.pi
r = np.zeros_like(th)
r1 = np.zeros_like(th)
theta = np.arctan2(q[:, 1], q[:, 0])
for i in np.arange(1):
for j, tt in enumerate(th):
if j == len(th) - 1:
fil = (theta >= tt)
else:
fil = (theta >= tt) * (theta < th[j + 1])
r[j] = np.nan_to_num(tao[fil].mean())
r1[j] = np.nan_to_num(fil.sum())
# c = pl.plot(th, r, lw=2)
# pl.plot(th, r1,lw=2)
# c.set_alpha(0.75)
# pl.semilogy(q[:,0].flatten(),tao[:,i].flatten()
# ,linestyle='.',marker='.',color='r',markersize =5)
pl.grid(True)
def postold(self):
try:
df = pd.read_csv(
"BTE.kappa_scalar",
sep=r"[ \t]+",
header=None,
names=['step', 'kappa'],
engine='python')
ks = np.array(df['kappa'])
plot(
(np.array(df['step']), 'Iteration Step'),
(ks, 'Thermal Conductivity (W/mK)'),
'kappa_scalar.png',
grid=True,
linewidth=2)
except Exception as e:
print(e)
try:
df = pd.read_csv(
"BTE.cumulative_kappa_scalar",
sep=r"[ \t]+",
header=None,
names=['l', 'kappa'],
engine='python')
ks = np.array(df['kappa'])
plot(
(np.array(df['l']),
'Cutoff Mean Free Path for Phonons (Angstrom)'),
(ks, 'Thermal Conductivity (W/mK)'),
'cumulative_kappa_scalar.png',
grid=True,
linewidth=2,
logx=True)
except Exception as e:
print(e)
try:
omega = np.loadtxt('BTE.omega') / (2.0 * np.pi)
kappa = np.loadtxt('BTE.kappa')[-1, 1:]
kappa = np.einsum('jji', kappa.reshape([3, 3, -1])) / 3.0
plot(
(np.arange(len(omega[0])), 'Band'),
(kappa, 'Thermal Conductivity (W/mK)'),
'kappa_band.png',
grid=True,
linewidth=2)
plot(
(np.arange(len(omega[0])), 'Band'),
(kappa.cumsum(), 'Thermal Conductivity (W/mK)'),
'cumulative_kappa_band.png',
grid=True,
linewidth=2)
except Exception as e:
print(e)
try:
w = np.loadtxt('BTE.w_final')
w = np.abs(w)
w[omega < omega.flatten().max() * 0.005] = float('nan')
plot(
(omega.flatten(), 'Frequency (THz)'), (w.flatten(),
'Scatter Rate (THz)'),
'scatter_freq.png',
grid=True,
scatter=True,
logy=True)
tao = 1.0 / w + 1e-6
with fig('tao_freq.png'):
pl.semilogy(
omega.flatten(),
tao.flatten(),
linestyle='.',
marker='.',
color='r',
markersize=5)
pl.xlabel('Frequency (THz)')
pl.ylabel('Relaxation Time (ps)')
pl.grid(True)
pl.xlim([0, omega.max()])
# pl.ylim([0,tao.flatten().max()])
to_txt(['freq', 'tao'],
np.c_[omega.flatten(), tao.flatten()], 'tao_freq.txt')
except Exception as e:
print(e)
"""
if not exists('relaxtime'):mkdir('relaxtime')
cd('relaxtime')
for i,om in enumerate(omega[:6]):
print "q : ",i
plot((om,'Frequency (THz)'),(tao[i],'Relaxation Time (ps)'),
'tao_freq_q%d.png'%i,grid=True,scatter=True,logx=True,logy=True)
cd('..')
"""
try:
v = np.loadtxt(open('BTE.v'))
n, m = v.shape
v = v.reshape([n, 3, m / 3])
v = np.linalg.norm(v, axis=1)
y = (v.flatten(), 'Group Velocity (nm/ps)')
plot(
(omega.flatten(), 'Frequency (THz)'),
y,
'v_freq.png',
grid=True,
scatter=True)
to_txt(['freq', 'vg'],
np.c_[omega.flatten(), v.flatten()], 'v_freq.txt')
except Exception as e:
print(e)
try:
l = v * tao
l[l < 1e-6] = None
plot(
(omega.flatten(), 'Frequency (THz)'), (l.flatten(),
'Mean Free Path (nm)'),
'lamda_freq.png',
grid=True,
scatter=True,
logy=True,
logx=True,
xmin=0)
to_txt(['freq', 'mfp'],
np.c_[omega.flatten(), l.flatten()], 'lamda_freq.txt')
except Exception as e:
print(e)
try:
q = np.loadtxt(open('BTE.qpoints'))
qnorm = np.linalg.norm(q[:, -3:], axis=1)
data = []
n, m = w.shape
for i in range(m):
data.append([qnorm, w[:, i], 'b'])
series(
xlabel='|q| (1/nm)',
ylabel='Scatter Rate (THz)',
datas=data,
filename='branchscatter.png',
scatter=True,
legend=False,
logx=True,
logy=True)
except Exception as e:
print(e)
def third(self):
mkdir('thirdorder')
cd('thirdorder')
cp('../POSCAR', '.')
self.generate_supercells3()
def vasprun3(self):
files = shell_exec("ls 3RD.*.*|sort -n").split('\n')
assert len(files) > 0
self.getvasprun(files)
def pSecond(self):
cp('../POSCAR', '.')
self.generate_supercells()
files = shell_exec("ls *-*").split('\n')
assert len(files) > 0
self.getvasprun(files)
def generate(self):
# m = self.m
self.minimizePOSCAR()
# cp('minimize/POSCAR','.')
mkdir('secondorder')
cd('secondorder')
self.pSecond()
self.fc2()
cd('..')
self.third()
self.vasprun3()
self.force_constant3()
cd('..')
self.pSheng()
self.runsheng()
def pSheng(self):
mkdir('SHENG')
cd('SHENG')
cp('../secondorder/FORCE_CONSTANTS', 'FORCE_CONSTANTS_2ND')
cp('../thirdorder/FORCE_CONSTANTS_3RD', '.')
self.getControl()
def runold(self):
# Thermal conductivity calculation
m = self.m
print("START SHENGBTE...")
passthru(config.mpirun + " %s " % (m.nodes * m.procs) + config.sheng)
def runsheng(self):
# Thermal conductivity calculation
m = self.m
print("START SHENGBTE...")
passthru(config.mpirun + " %s " % (m.nodes * m.procs) +
config.shengbte)
def kmfp(self):
def ff(p, x):
# return p[0]*(1.0-np.exp(-x**p[2]/p[1]))
return 1.0 / (p[1] / x + 1 / p[0]) - p[2]
# return p[0]*p[1]**x
def fit(x, z, p0, tt):
def errorfunc(p, x, z):
return tt(p, x) - z
from scipy.optimize import leastsq
solp, ier = leastsq(
errorfunc,
p0,
args=(x, z),
Dfun=None,
full_output=False,
ftol=1e-9,
xtol=1e-9,
maxfev=100000,
epsfcn=1e-10,
factor=0.1)
return solp
dirs = ls('shengold*')
from aces.scanf import sscanf
from aces.graph import fig, pl
us = []
for d in dirs:
f = shell_exec('grep ngrid %s/CONTROL' % d)
ks = sscanf(f, " ngrid(:)=%d %d %d")
if (ks[1] != 4):
continue
f = np.loadtxt('%s/BTE.cumulative_kappa_scalar' % d)
us.append([ks, f])
with fig('reduce_mfp.png', legend=True, ncol=1):
for i, u in enumerate(us):
if i < 3:
continue
ks, f = u
x, y = f[:, 0], f[:, 1]
pl.semilogx(x, y, label="Nx= %d " % ks[0], linewidth=2)
ks, f = us[-1]
x, y = f[:, 0], f[:, 1]
# fil=(x>0)
# p=fit(x[fil],y[fil],[1,1,1],ff)
# y1=ff(p,x)
# pl.semilogx(x,y1,label="fit of Nx= %d "%ks[0],linewidth=2)
pl.xlabel('Cutoff Mean Free Path for Phonons (Angstrom)')
pl.ylabel('Thermal Conductivity (W/mK)')
pl.grid(True)
with fig('kappa_inv_mpf_inv.png', legend=True, ncol=1):
ks, f = us[-1]
fil = x > .5
x, y = f[fil, 0], f[fil, 1]
xx = 1 / x
yy = 1 / y
pl.plot(xx, yy, linewidth=3, c='red', label="Nx=1024")
def ll(p, x):
return p[0] * x + p[1]
fil = xx > xx.max() / 4
p = fit(xx[fil], yy[fil], [1, 1, 1], ll)
pl.plot(xx, ll(p, xx), lw=3, ls='dashed', label="Fitted")
pl.xlabel('1/L (1/Angstrom)')
pl.ylabel('$1/\\kappa_L$ (mK/W)')
pl.grid(True)
| vanceeasleaf/aces | aces/runners/shengbte.py | Python | gpl-2.0 | 28,228 | [
"ASE",
"CRYSTAL",
"phonopy"
] | 576f29e6e34aa6b1efc5ee4d7049071342b26dd08df0e0ffd16d5728ff4787a9 |
"""
Neon Space
By: Dalton Fox, Nathaniel Craiglow, Brandon Mitman and Mason Lee
ETGG1802:52
"""
from particles import *
from functions import *
import time
import sys, os.path
import math
import pygame
import random
if sys.platform in ["win32", "win64"]:
os.environ["SDL_VIDEO_CENTERED"] = "1"
filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets')
soundpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sounds')
class Player(object):
def __init__(self, x, y, player_index, sprite, angle=0, heading=0, max_speed=2.6, speed=0, thrusting=False):
self.isVisible = True
self.isInvincible = False
self.score = 0
self.lives = 3
self.x = x
self.y = y
self.angle = angle
self.heading = heading
self.speed = speed
self.max_speed = max_speed
self.thrusting = thrusting
self.thruster_locked = False
self.thrust_force_dx = 0
self.thrust_force_dy = 0
self.fire_delay = 0
self.rof = 0.3
self.hp = 5
self.energy = 100
self.light = False
self.light_time = 0
self.light_size = 200
self.flicker_time = 0
self.isInvincible_time = 0
self.insanity = 0
self.sprite = sprite
self.zoom = 0.35
self.player_index = player_index
self.color_index = 0
self.color = color_list[self.color_index]
self.glow = pygame.image.load(os.path.join(filepath, glow_list[self.color_index])).convert_alpha()
self.glow_ring = pygame.image.load(os.path.join(filepath, glow_ring_list[self.color_index])).convert_alpha()
self.adjusted_color = adjustcolor(self.color)
self.mass = 2
def fire(self):
sound_shoot.play()
self.fire_delay = time.time() + self.rof
self.energy -= fire_cost
return Projectile(self.color, 7, self.x, self.y, self.angle, sprite_projectile, self.zoom)
def render(self, surface):
# Draw Ship Stuff
rotated_sprite = pygame.transform.rotozoom(self.sprite, self.angle, self.zoom).convert_alpha()
surface.blit(rotated_sprite, ((self.x-rotated_sprite.get_width()/2) - world.screenleft, (self.y-rotated_sprite.get_height()/2) - world.screentop))
if self.light:
surface.blit(self.glow_ring, (self.x-world.screenleft-self.glow_ring.get_width()/2, self.y-world.screentop-self.glow_ring.get_height()/2))
# Draw Hud Stuff
for i in range(0, 5): # Red squares for health
pygame.draw.rect(screen, (95, 10, 10), [i * 30 + 325, 705, 20, 20], 0)
for i in range(0, self.hp): # Red squares for health
pygame.draw.rect(screen, (235, 30, 30), [i * 30 + 325, 705, 20, 20], 0)
percent = self.energy / 100 # Energy bar
pygame.draw.rect(screen, (30, 30, 235), [740, 705, -percent * 250, 20], 0)
pygame.draw.rect(screen, (255, 255, 255), [740, 705, -250, 20], 2)
pygame.draw.line(screen, (235, 235, 235), (700, 705), (700, 725), 2)
insanity_percent = self.insanity / 100 # insanity bar
pygame.draw.rect(screen, (0, 255, 0), [270, 705, -insanity_percent * 125, 20], 0)
pygame.draw.rect(screen, (255, 255, 255), [270, 705, -125, 20], 2)
def update(self):
# Score
if self.score < 0:
self.score = 0
# I-frames
if self.isInvincible and time.time() > self.isInvincible_time:
self.isInvincible = False
if self.isInvincible:
self.hp = 5
if self.insanity >= 100:
self.hp -= 1
self.insanity = 0
if self.insanity <= 0:
self.insanity = 0
if self.light == False:
global insanity_rate
self.insanity += insanity_rate
else:
self.insanity -= .25
if self.energy < 0:
self.energy = 0
if self.energy > 100:
self.energy = 100
if self.energy <= ship_shutdown:
self.rof = 0.15
self.max_speed = 1.6
insanity_rate = 0.06
self.light = False
else:
insanity_rate = 0.03
self.rof = 0.3
self.max_speed = 2.6
if not self.light:
self.light_size = 300
if self.light:
self.energy -= spot_cost
self.light_size = 700
if self.light == True and self.speed < 1:
self.energy -= spot_cost*3
if self.thrusting:
thrust_force = 0.02
if self.thruster_locked:
self.energy -= move_cost/2
else:
self.energy -= move_cost
else:
thrust_force = 0
self.energy += energy_recharge
if not self.thruster_locked:
if self.speed > 0:
self.speed -= 0.0058
else:
self.speed = 0
dx = self.speed*math.cos(math.radians(self.heading))
dy = -1*self.speed*math.sin(math.radians(self.heading))
if not self.thrusting and not self.thruster_locked:
self.thrust_force_dx = 0
self.thrust_force_dy = 0
if self.thrusting and not self.thruster_locked:
self.thrust_force_dx = thrust_force*math.cos(math.radians(self.angle))
self.thrust_force_dy = thrust_force*-1*math.sin(math.radians(self.angle))
if self.thruster_locked:
self.thrust_force_dx = self.thrust_force_dx
self.thrust_force_dy = self.thrust_force_dy
if self.thrusting and self.thruster_locked:
self.energy -= move_cost/2
else:
self.energy -= move_cost*10
resultant_dx = dx+self.thrust_force_dx
resultant_dy = dy+self.thrust_force_dy
if self.x > world.width:
self. x = world.width
resultant_dx *= -1
if self.x < 0:
self.x = 0
resultant_dx *= -1
if self.y > world.height:
self.y = world.height
resultant_dy *= -1
if self.y < 0:
self.y = 0
resultant_dy *= -1
for asteroid in asteroid_list:
if collidingCircles(self.x, self.y, 30 / 1.5, asteroid.x, asteroid.y, asteroid.mass / 1.2):
if asteroid.mass > 100:
resultant_dx *= -2
resultant_dy *= -2
self.hp -= 5
players[player_index].score += hit_by_asteroid
sound_hit.play()
elif 100 > asteroid.mass > 50:
resultant_dx *= -2
resultant_dy *= -2
self.hp -= 3
self.energy -= 20
players[player_index].score += hit_by_asteroid
sound_hit.play()
else:
resultant_dx *= -1
resultant_dy *= -1
self.hp -= 1
self.energy -= 10
players[player_index].score += hit_by_asteroid
sound_hit.play()
if self.speed < self.max_speed:
self.speed = (resultant_dx**2 + resultant_dy**2)**0.5
else:
self.speed = self.max_speed
self.heading = math.degrees(math.atan2(-resultant_dy, resultant_dx))
self.x += resultant_dx
self.y += resultant_dy
if self.hp <= 0:
global respawn_time
if self.lives > 1:
respawn_time = time.time() + 3
self.light = True
kill()
class Projectile(object):
def __init__(self, color, speed, start_x, start_y, start_heading, sprite, zoom, decay=0.1):
self.x = start_x
self.y = start_y
self.heading = start_heading
self.speed = speed
self.life_left = 100
self.decay = decay
self.sprite = sprite
self.zoom = zoom
self.PixelArray = pygame.PixelArray(self.sprite)
self.PixelArray.replace((255, 255, 255), color)
def render(self, screen):
if self.x > world.screenleft and self.x < world.screenleft + screen_width and self.y > world.screentop and self.y < world.screentop + screen_height:
new_sprite = pygame.transform.rotozoom(self.sprite, self.heading, self.zoom)
screen.blit(new_sprite, ((self.x-new_sprite.get_width()/2)-world.screenleft, (self.y-new_sprite.get_width()/2)-world.screentop))
def update(self):
dx = self.speed * math.cos(math.radians(self.heading))
dy = -1 * self.speed * math.sin(math.radians(self.heading))
self.x += dx
self.y += dy
self.life_left -= self.decay
if self.life_left <= 0:
projectiles.remove(self)
class World(object):
def __init__(self, width, height, screenleft=0, screentop=0, screenwidth=1024, screenheight=768):
self.height = height
self.width = width
self.screenleft = screenleft
self.screentop = screentop
self.screenwidth = screenwidth
self.screenheight = screenheight
self.object_list = []
def add_object(self, objectleft, objecttop, objectsurface, objectcollides=True):
self.object_list.append(WorldObject(objectleft, objecttop, objectsurface, objectcollides))
def render_objects(self, surface):
# draw grid-lines
for c3 in range(0, world.width, int(world.width/80)):
pygame.draw.line(surface, (35, 35, 40), (c3 - self.screenleft, 0 - self.screentop), (c3 - self.screenleft, self.height - self.screentop), 1)
for r3 in range(0, world.height, int(world.height/80)):
pygame.draw.line(surface, (35, 35, 40), (0 - self.screenleft, r3 - self.screentop), (self.width - self.screenleft, r3 - self.screentop), 1)
for c2 in range(0, world.width, int(world.width/20)):
pygame.draw.line(surface, (60, 60, 65), (c2 - self.screenleft, 0 - self.screentop), (c2 - self.screenleft, self.height - self.screentop), 1)
for r2 in range(0, world.height, int(world.height/20)):
pygame.draw.line(surface, (60, 60, 65), (0 - self.screenleft, r2 - self.screentop), (self.width - self.screenleft, r2 - self.screentop), 1)
for c1 in range(0, world.width, int(world.width/4)):
pygame.draw.line(surface, (120, 120, 130), (c1 - self.screenleft, 0 - self.screentop), (c1 - self.screenleft, self.height - self.screentop), 1)
for r1 in range(0, world.height, int(world.height/4)):
pygame.draw.line(surface, (120, 120, 130), (0 - self.screenleft, r1 - self.screentop), (self.width - self.screenleft, r1 - self.screentop), 1)
for object in self.object_list:
surface.blit(object.surface, (object.left-self.screenleft, object.top-self.screentop))
# draw a rectangle around the world perimeter
pygame.draw.rect(surface, (145, 50, 50), (int(0-self.screenleft), int(0-self.screentop), self.width, self.height), 3)
def collide_objects(self, collideleft, collidetop, collidewidth, collideheight):
rectangle = pygame.Rect(collideleft, collidetop, collidewidth, collideheight)
for object in self.object_list:
if object.collides == True:
if rectangle.colliderect((object.left, object.top, object.width, object.height)) == True:
return True
return False
class WorldObject(object):
def __init__(self, left, top, surface, collides=True):
self.left = left
self.top = top
self.surface = surface
self.height = surface.get_height()
self.width = surface.get_width()
self.collides = collides
self.x = self.surface.get_width() / 2
self.y = self.surface.get_height() / 2
class Radar(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self, self.groups)
self.width = 175
self.height = 175
self.image = pygame.Surface((self.width, self.height))
self.drawmap()
self.rect = self.image.get_rect()
self.rect.topleft = (screen_width - self.width - screen_buffer, screen_height - self.height - screen_buffer)
self.factor_x = self.width * 1.0 / world.width
self.factor_y = self.height * 1.0 / world.height
def drawmap(self):
self.image.fill((0, 0, 0))
pygame.draw.rect(self.image, (250, 250, 250), (0, 0, self.width, self.height), 2)
self.image.set_alpha(175)
def update(self):
self.drawmap()
# Grid-lines
for cl in range(0, world.width, int(world.width/4)):
pygame.draw.line(self.image, (100, 100, 100), (round(cl * self.factor_x, 0), 0), (round(cl * self.factor_x), round(world.height * self.factor_y)), 1)
for rl in range(0, world.height, int(world.height/4)):
pygame.draw.line(self.image, (100, 100, 100), (0, round(rl * self.factor_y, 0)), (round(world.width * self.factor_x), round(rl * self.factor_y)), 1)
# Player rect
pygame.draw.rect(self.image, (200, 200, 200), (round((world.screenleft - players[player_index].light_size) * self.factor_x, 0),
round((world.screentop - players[player_index].light_size) * self.factor_y, 0),
round((screen_width + players[player_index].light_size * 2) * self.factor_x, 0),
round((screen_height + players[player_index].light_size * 2) * self.factor_y, 0)), 1)
# asteroids
for asteroid in asteroid_list:
if asteroid.x > round(world.screenleft - players[player_index].light_size):
if round(asteroid.x < world.screenleft + screen_width + players[player_index].light_size):
if asteroid.y > round(world.screentop - players[player_index].light_size):
if asteroid.y < round(world.screentop + screen_width + players[player_index].light_size):
# if the asteroid is within our view range draw it
if asteroid.mass > 200 and asteroid.mass < 300:
pygame.draw.circle(self.image, (200, 155, 155), (int(asteroid.x * self.factor_x),
int(asteroid.y * self.factor_y)), 4)
if asteroid.mass > 100 and asteroid.mass < 200:
pygame.draw.circle(self.image, (200, 155, 155), (int(asteroid.x * self.factor_x),
int(asteroid.y * self.factor_y)), 3)
else:
pygame.draw.circle(self.image, (200, 155, 155), (int(asteroid.x * self.factor_x),
int(asteroid.y * self.factor_y)), 3)
# Planets
for object in world.object_list:
if object.surface == sun:
pygame.draw.circle(self.image, (245, 255, 55), (int((object.left + object.surface.get_width() / 2) * self.factor_x),
int((object.top + object.surface.get_height() / 2) * self.factor_y)), 13)
elif object.surface == beacon:
pygame.draw.circle(self.image, (45, 155, 255), (int((object.left + object.surface.get_width() / 2) * self.factor_x),
int((object.top + object.surface.get_height() / 2) * self.factor_y)), 6)
else:
pygame.draw.circle(self.image, (55, 255, 55), (int((object.left + object.surface.get_width() / 2) * self.factor_x),
int((object.top + object.surface.get_height() / 2) * self.factor_y)), 10)
# Player dot
for player in players:
if player.x > round(world.screenleft - players[player_index].light_size):
if player.x < round(world.screenleft + screen_width + players[player_index].light_size):
if player.y > round(world.screentop - players[player_index].light_size):
if player.y < round(world.screentop + screen_height + players[player_index].light_size):
# if the player is within our view range draw it
pygame.draw.circle(self.image, (player.adjusted_color), (int(player.x * self.factor_x),
int(player.y * self.factor_y)), 2)
# Black holes
#for blackhole in blackhole_list:
#pygame.draw.circle(self.image, (0, 155, 155), (int(blackhole.x * self.factor_x),
#int(blackhole.y * self.factor_y)), 2)
class Star(object):
def __init__(self, chose):
self.x = random.randint(int(world.screenleft), int(world.screenleft + screen_width))
self.y = random.randint(int(world.screentop), int(world.screentop + screen_height))
self.stretched_x = self.x
self.stretched_y = self.y
self.chose = chose
def update(self, player):
if self.stretched_x < world.screenleft:
self.stretched_x = world.screenleft + screen_width
if self.stretched_x > world.screenleft + screen_width:
self.stretched_x = world.screenleft
if self.stretched_y < world.screentop:
self.stretched_y = world.screentop + screen_height
if self.stretched_y > world.screentop + screen_height:
self.stretched_y = world.screentop
self.x = self.stretched_x
self.y = self.stretched_y
self.stretched_x += -1 * player.speed * 0.5 * math.cos(math.radians(player.heading))
self.stretched_y += player.speed * 0.5 * math.sin(math.radians(player.heading))
class StarField(object):
def __init__(self):
self.star_list = []
num_stars = random.randint(30, 70)
for i in range(0, num_stars):
self.star_list.append(Star(random.randint(1, 2)))
def drawStars(self, player):
for star in self.star_list:
if star.x > world.screenleft and star.x < world.screenleft + screen_width and star.y > world.screentop and star.y < world.screentop + screen_height:
if star.chose == 1:
pygame.draw.line(screen, (210, 210, 255), (star.x-world.screenleft, star.y-world.screentop), (star.stretched_x-world.screenleft, star.stretched_y-world.screentop), 2)
if star.chose == 2:
pygame.draw.line(screen, (130, 130, 180), (star.x-world.screenleft, star.y-world.screentop), (star.stretched_x-world.screenleft, star.stretched_y-world.screentop), 1)
star.update(player)
class Asteroid(object):
def __init__(self, position, mass):
minmass = 30
maxmass = 200
self.mass = clamp(mass, minmass, maxmass)
massfactor = ((1 - ((mass - minmass) / (maxmass - minmass))) * 5) + 0.015
self.x = position[0]
self.y = position[1]
self.dx = (clamp(mass, minmass, maxmass) * massfactor) * random.uniform(-1, 1)
self.dy = (clamp(mass, minmass, maxmass) * massfactor) * random.uniform(-1, 1)
self.angle = random.uniform(0, 360)
self.spin = random.uniform(-1, 1)
self.health = 10
points = random.randint(5, 12)
self._points = []
for i in range(points):
angle = (i * 2 * math.pi) / points
point = []
point.append(math.cos(angle))
point.append(math.sin(angle))
self._points.append(point)
self.get_points()
self.occluder = Occluder(self.real_points)
def rotate(self, point, angle_rad):
return [math.cos(angle_rad) * point[0] - math.sin(angle_rad) * point[1], math.sin(angle_rad) * point[0] + math.cos(angle_rad) * point[1]]
def hit(self):
sound_hit.play()
self.health -= 1
self.mass -= 10
def get_points(self):
self.real_points = []
angle_rad = math.radians(self.angle)
for x, y in self._points:
rotated = self.rotate([self.mass * x, self.mass * y], angle_rad)
self.real_points.append([(rotated[0] + self.x) - world.screenleft,
(rotated[1] + self.y) - world.screentop])
def update(self, dt):
# Hurt asteroids if they get shot
if len(projectiles) > 0:
for bullet in projectiles:
if collidingCircles(self.x, self.y, self.mass / 1.5, bullet.x, bullet.y, 10):
self.hit()
projectiles.remove(bullet)
players[player_index].score += hit
if self.mass <= 15:
asteroid_list.remove(self)
players[player_index].score += destroy
self.x += self.dx * dt
self.y += self.dy * dt
if self.x < 0:
self.x = 0
self.dx *= -1
elif self.x > world.width:
self.x = world.width
self.dx *= -1
if self.y < 0:
self.y = 0
self.dy *= -1
elif self.y > world.height:
self.y = world.height
self.dy *= -1
self.angle = (self.angle+self.spin) % 360
self.get_points()
self.occluder.set_points(self.real_points)
self.occluder.set_bounce(0.1)
def render(self, surface):
pygame.draw.aalines(surface, (255, 255, 255), True, self.real_points)
class blackhole(object):
def __init__(self):
self.x = random.randint(1000, world.width-1000)
self.y = random.randint(1000, world.height-1000)
self.mass = 1000
self.size = 150
def draw(self, surface):
pygame.draw.circle(surface, (0, 0, 0), (int(self.x-world.screenleft), int(self.y-world.screentop)), self.size, 0)
pygame.draw.circle(surface, (20, 20, 20), (int(self.x-world.screenleft), int(self.y-world.screentop)), self.size, 2)
def kill():
dead = True
sound_explode.play()
while dead:
if time.time() > respawn_time:
players[player_index].x = random.randint(1000, 9000)
players[player_index].y = random.randint(1000, 9000)
players[player_index].speed = 0
players[player_index].energy = 100
players[player_index].hp = 5
players[player_index].lives -= 1
fix(players[player_index])
(world.screenleft, world.screentop) = (players[player_index].x-world.screenwidth/2, players[player_index].y-world.screenheight/2)
global star_field
star_field = StarField()
dead = False
players[player_index].score += death
if players[player_index].lives > 0:
players[player_index].isInvincible = True
players[player_index].isInvincible_time = time.time() + 3
players[player_index].light = False
else:
screen.fill((0, 0, 0), (0, 0, 250, 25))
respawn_font = font.render('Respawn time: ' + str(round(respawn_time-time.time(), 1)), 1, (200, 200, 0))
screen.blit(respawn_font, (0, 0))
pygame.display.update()
def teleport(item):
sound_teleport.play()
players[player_index].x = random.randint(1000, 9000)
players[player_index].y = random.randint(1000, 9000)
players[player_index].speed = 0
players[player_index].energy = 0
players[player_index].hp -= 2
players[player_index].score += warped
item.x = random.randint(0, world.width)
item.y = random.randint(0, world.height)
fix(players[player_index])
(world.screenleft, world.screentop) = (players[player_index].x-world.screenwidth/2, players[player_index].y-world.screenheight/2)
global star_field
star_field = StarField()
def fix(object):
for obstacle in danger_obstacles:
if obstacle == Asteroid:
distance = obstacle.mass
elif obstacle == WorldObject:
(x,y) = obstacle.surface.get_size()
distance = (x + y) / 2 + 500
elif obstacle == blackhole:
distance = obstacle.mass
else:
distance = 0
while avoid_obstacles(object, distance, danger_obstacles):
object.x = random.randint(1000, 9000)
object.y = random.randint(1000, 9000)
# Game set-up
pygame.mixer.pre_init(44100, 16, 2, 4096) # frequency, size, channels, buffersize
pygame.init()
screen_width = 1024
screen_height = 768
flags = pygame.SWSURFACE | pygame.DOUBLEBUF # double buffering increases FPS
screen = pygame.display.set_mode((screen_width, screen_height), flags, 32)
screen_buffer = 32
fps = 120 # Pushed up to 300 FPS with the improvements
clock = pygame.time.Clock()
font = pygame.font.SysFont('Arial', 26)
font_small = pygame.font.SysFont('Arial', 16)
menu_font = pygame.font.SysFont('Arial', 80)
large_menu_font = pygame.font.SysFont('Arial', 240)
hp_font = font.render('HP', 1, (235, 30, 30))
low_hp_font = font_small.render('Hull Breach!', 1, (200, 95, 0))
energy_font = font.render('Energy', 1, (30, 30, 235))
low_energy_font = font_small.render('Ship Impaired!', 1, (200, 200, 0))
insanity_font = font.render('Insanity', 1, (0, 255, 0))
high_insanity_font = font_small.render('Crew Panicked!', 1, (95, 200, 0))
blackhole_font = font_small.render('Gravity Influx Detected!', 1, (95, 20, 95))
start_font = menu_font.render('Start', 1, (0, 0, 255))
exit_font = menu_font.render('Quit', 1, (0, 0, 255))
ship_font = menu_font.render('Pick a Ship!', 1, (0, 0, 255))
color_font = menu_font.render('Pick a Color!', 1, (0, 0, 255))
left_arrow = large_menu_font.render('<', 1, (0, 0, 255))
right_arrow = large_menu_font.render('>', 1, (0, 0, 255))
respawn_time = 0
respawn_font = font_small.render(str(respawn_time), 1, (200, 200, 0))
menu = True
ship_menu = False
color_menu = False
credits = False
game = False
panty = False
ship_index = 1
warning_time = time.time() + 1
music = True
pygame.mixer.music.set_volume(0.35)
# Assets
panty_ship = pygame.image.load(os.path.join(filepath, "panty_ship.png"))
sprite_projectile = pygame.image.load(os .path.join(filepath, "projectile.png")).convert_alpha()
shroud = pygame.image.load(os.path.join(filepath, "shroud.png")).convert_alpha()
planet_1 = pygame.image.load(os.path.join(filepath, "planet_1.png")).convert_alpha()
planet_2 = pygame.image.load(os.path.join(filepath, "planet_2.png")).convert_alpha()
sun = pygame.image.load(os.path.join(filepath, "sun.png")).convert_alpha()
beacon = pygame.image.load(os.path.join(filepath, "Beacon.png")).convert_alpha()
menu_screen = pygame.image.load(os.path.join(filepath, "menu_screen.png")).convert_alpha()
title = pygame.image.load(os.path.join(filepath, "menu_title.png")).convert_alpha()
small_shroud = pygame.image.load(os.path.join(filepath, "shroud_small.png")).convert_alpha()
credits_pic = pygame.image.load(os.path.join(filepath, "credits.png")).convert_alpha()
sound_click = pygame.mixer.Sound(os.path.join(soundpath, "click.ogg"))
sound_warning = pygame.mixer.Sound(os.path.join(soundpath, "energy_low.ogg"))
sound_explode = pygame.mixer.Sound(os.path.join(soundpath, "explosion.ogg"))
sound_hit = pygame.mixer.Sound(os.path.join(soundpath, "hit.ogg"))
sound_teleport = pygame.mixer.Sound(os.path.join(soundpath, "teleport.ogg"))
sound_shoot = pygame.mixer.Sound(os.path.join(soundpath, "shoot.ogg"))
color_list = [(0, 75, 227), (0, 227, 227), (0, 227, 0), (114, 227, 0), # blue, blue-green, green, green-yellow
(227, 227, 0), (227, 169, 0), (227, 114, 0), (227, 107, 0), # yellow, yellow-orange, orange, orange-red
(227, 0, 0), (227, 0, 227), (114, 0, 227), (125, 0, 227)] # red, red-violet, violet, blue-violet
glow_list = ["glow_blue.png", "glow_blue-green.png", "glow_green.png", "glow_green-yellow.png",
"glow_yellow.png", "glow_yellow-orange.png", "glow_orange.png", "glow_orange-red.png",
"glow_red.png", "glow_red-violet.png", "glow_violet.png", "glow_blue-violet.png"]
glow_ring_list = ["glow_ring_blue.png", "glow_ring_blue-green.png", "glow_ring_green.png", "glow_ring_green-yellow.png",
"glow_ring_yellow.png", "glow_ring_yellow-orange.png", "glow_ring_orange.png", "glow_ring_orange-red.png",
"glow_ring_red.png", "glow_ring_red-violet.png", "glow_ring_violet.png", "glow_ring_blue-violet.png"]
# Object Initialization
player_index = 0
players = []
players.append(Player(random.randint(1000, 9000), random.randint(1000, 9000), player_index, panty_ship))
projectiles = []
world = World(10000, 10000, 0, 0, screen_width, screen_height)
world.screenleft = players[player_index].x-world.screenwidth/2
world.screentop = players[player_index].y-world.screenheight/2
star_field = StarField()
G = 0.005
world.add_object(6900, 2000, planet_2, True)
world.add_object(2000, 6900, planet_1, True)
world.add_object(3900, 3900, sun, True)
world.add_object(1895, 1895, beacon, True)
world.add_object(6895, 6895, beacon, True)
asteroid_list = []
num_asteroids = 70
for i in range(num_asteroids):
asteroid_list.append(Asteroid([random.randint(1000, 9000), random.randint(1000, 9000)], random.randint(50, 300)))
blackhole_list = []
for i in range(9):
blackhole_list.append(blackhole())
# Draw radar / hud
radar = pygame.sprite.LayeredUpdates()
Radar.groups = radar
Radar()
# Ship variables
energy_recharge = 0.16
fire_cost = 1.25
spot_cost = 0.025
move_cost = 0.015
ship_shutdown = 15
light_reload = 0.5
flicker_reload = 0.05
crew_panicked = 75
insanity_rate = 0.045
# Score variables
death = -15
warped = -3
hit_by_asteroid = -1
hit = 2
destroy = 10
danger_obstacles = []
for object in asteroid_list:
danger_obstacles.append(object)
for object in world.object_list:
danger_obstacles.append(object)
for object in blackhole_list:
danger_obstacles.append(object)
fix(players[player_index])
#particles
fire_colors = [(200,200,255),(150,150,200),(50,50,150),(25,25,75),(5,5,25),(0,0,0)]
emitter_stream = Emitter()
emitter_stream.set_density(0)
emitter_stream.set_speed([25.0,150.0])
emitter_stream.set_life([1.0,3.0])
emitter_stream.set_colors(fire_colors)
particle_system = ParticleSystem()
particle_system.add_emitter(emitter_stream, "stream")
#####~~~~~~#####
##### Menu #####
#####~~~~~~#####
# Play Music
if music:
pygame.mixer.music.load(os.path.join(soundpath, "mm.ogg"))
pygame.mixer.music.play(-1)
while menu:
pygame.event.pump()
mx, my = pygame.mouse.get_pos()
mouse_left, mouse_middle, mouse_right = pygame.mouse.get_pressed()
# Start
if mx > 750 and mx < 925 and my > 405 and my < 485 and mouse_left:
print('Pick a Ship!')
sound_click.play()
ship_menu = True
menu = False
time.sleep(0.25)
# Quit
if mx > 610 and mx < 765 and my > 615 and my < 695 and mouse_left or KeyIsPressed('escape'):
print(':(')
sound_click.play()
menu = False
time.sleep(0.25)
# Render
screen.fill((0, 0, 0))
screen.blit(menu_screen, (0, 0))
new_title = pygame.transform.rotozoom(title, 0, 0.7) # Title
screen.blit(new_title, (-20, 10))
# Menu buttons and fonts
screen.blit(start_font, (768, 400)) # Start
pygame.draw.rect(screen, (200, 200, 200), (750, 405, 175, 80), 1)
screen.blit(exit_font, (625, 610)) # Quit
pygame.draw.rect(screen, (200, 200, 200), (610, 615, 155, 80), 1)
pygame.display.update()
#####~~~~~~~~~~~~~~~~~~#####
##### Ship select loop #####
#####~~~~~~~~~~~~~~~~~~#####
while ship_menu:
pygame.event.pump()
mx, my = pygame.mouse.get_pos()
mouse_left, mouse_middle, mouse_right = pygame.mouse.get_pressed()
# Quit
if mx > 610 and mx < 765 and my > 615 and my < 695 and mouse_left or KeyIsPressed('escape'):
sound_click.play()
print(':(')
ship_menu = False
time.sleep(0.25)
# PANTY SHIP
if KeyIsPressed('p'):
if panty:
panty = False
else:
panty = True
time.sleep(0.25)
# Cycle ships
if mx > screen_width/2-425 and mx < screen_width/2-425+150 and my > screen_height/2-100 and my < screen_height/2-100+150 and mouse_left:
sound_click.play()
ship_index -= 1
time.sleep(0.25)
if mx > screen_width/2+275 and mx < screen_width/2+275+150 and my > screen_height/2-100 and my < screen_height/2-100+150 and mouse_left:
sound_click.play()
ship_index += 1
time.sleep(0.25)
if ship_index < 1:
ship_index = 14
if ship_index > 14:
ship_index = 1
if panty:
players[player_index].sprite = panty_ship
else:
ship_select = "ship_" + str(ship_index) + ".png"
ship = pygame.image.load(os.path.join(filepath, ship_select))
players[player_index].sprite = ship
# Start
if mx > 412 and mx < 587 and my > 680 and my < 760:
if mouse_left:
sound_click.play()
print('Pick a Color!')
color_menu = True
ship_menu = False
time.sleep(0.25)
# Render
screen.fill((0, 0, 0))
# Arrow keys
pygame.draw.rect(screen, (255, 255, 255), (screen_width/2-425, screen_height/2-100, 150, 150), 3) # Left
screen.blit(left_arrow, (screen_width/2-395, screen_height/2-165))
pygame.draw.rect(screen, (255, 255, 255), (screen_width/2+275, screen_height/2-100, 150, 150), 3) # Right
screen.blit(right_arrow, (screen_width/2+285, screen_height/2-165))
# Middle box for ships
pygame.draw.rect(screen, (255, 255, 255), (screen_width/2-225, screen_height/2-225, 450, 450), 3)
# Ships
if panty:
new_ship = pygame.transform.scale(panty_ship, (400, 400))
screen.blit(new_ship, (screen_width/2-200, screen_height/2-200))
else:
new_ship = pygame.transform.scale(ship, (400, 400))
screen.blit(new_ship, (screen_width/2-200, screen_height/2-200))
# Menu buttons and fonts
screen.blit(ship_font, (screen_width/2-180, 10)) # Ship font
screen.blit(start_font, (screen_width/2-80, 675)) # Start
pygame.draw.rect(screen, (200, 200, 200), (412, 680, 175, 80), 1)
pygame.display.update()
#####~~~~~~~~~~~~#####
##### Color Loop #####
#####~~~~~~~~~~~~#####
while color_menu:
pygame.event.pump()
mx, my = pygame.mouse.get_pos()
mouse_left, mouse_middle, mouse_right = pygame.mouse.get_pressed()
# Quit
if mx > 610 and mx < 765 and my > 615 and my < 695 and mouse_left or KeyIsPressed('escape'):
sound_click.play()
print(':(')
color_menu = False
time.sleep(0.25)
# Cycle colors
if mx > screen_width/2-425 and mx < screen_width/2-425+150 and my > screen_height/2-100 and my < screen_height/2-100+150 and mouse_left:
sound_click.play()
players[player_index].color_index -= 1
time.sleep(0.25)
if mx > screen_width/2+275 and mx < screen_width/2+275+150 and my > screen_height/2-100 and my < screen_height/2-100+150 and mouse_left:
sound_click.play()
players[player_index].color_index += 1
time.sleep(0.25)
if players[player_index].color_index < 0:
players[player_index].color_index = 11
if players[player_index].color_index > 11:
players[player_index].color_index = 0
# Update colors
players[player_index].color = color_list[players[player_index].color_index]
players[player_index].glow = pygame.image.load(os.path.join(filepath, glow_list[players[player_index].color_index])).convert_alpha()
players[player_index].glow_ring = pygame.image.load(os.path.join(filepath, glow_ring_list[players[player_index].color_index])).convert_alpha()
players[player_index].adjusted_color = adjustcolor(players[player_index].color)
# Start
if mx > 412 and mx < 587 and my > 680 and my < 760 and mouse_left:
sound_click.play()
print('Start!')
color_menu = False
game = True
time.sleep(0.25)
# Render
screen.fill((0, 0, 0))
#Arrow Keys
pygame.draw.rect(screen, (255, 255, 255), (screen_width/2-425, screen_height/2-100, 150, 150), 3) # Left
screen.blit(left_arrow, (screen_width/2-395, screen_height/2-165))
pygame.draw.rect(screen, (255, 255, 255), (screen_width/2+275, screen_height/2-100, 150, 150), 3) # Right
screen.blit(right_arrow, (screen_width/2+285, screen_height/2-165))
# Color
new_color = pygame.transform.scale(players[player_index].glow, (400, 400))
screen.blit(new_color, (screen_width/2-200, screen_height/2-200))
# Menu buttons and fonts
screen.blit(color_font, (screen_width/2-180, 10)) # Ship font
screen.blit(start_font, (screen_width/2-80, 675)) # Start
pygame.draw.rect(screen, (200, 200, 200), (412, 680, 175, 80), 1)
pygame.display.update()
if game == True:
pygame.display.update()
pygame.mixer.music.fadeout(1500)
time.sleep(0.25) # load things
#####~~~~~~~~~~~#####
##### Game Loop #####
#####~~~~~~~~~~~#####
# Play Music
if music:
pygame.mixer.music.load(os.path.join(soundpath, "bgm.ogg"))
pygame.mixer.music.play(-1)
players[player_index].isInvincible = True
players[player_index].isInvincible_time = time.time() + 5
while game:
### print("fps: ", clock.get_fps())
dt = (1 / fps)
(mouse_left, mouse_middle, mouse_right) = pygame.mouse.get_pressed()
# Escape sequence
if KeyIsPressed('escape') or players[player_index].lives <= 0:
game = False
time.sleep(0.45)
credits = True
# Controls
# Go!
if KeyIsPressed('w'):
players[player_index].thrusting = True
emitter_stream.set_density(300)
emitter_stream.set_angle(-players[player_index].angle + 180,45.0)
else:
players[player_index].thrusting = False
emitter_stream.set_density(0)
# Rotation
if KeyIsPressed('a'):
players[player_index].angle += 2
if players[player_index].speed >= 1:
players[player_index].speed -= 0.004
if KeyIsPressed('d'):
players[player_index].angle -= 2
if players[player_index].speed >= 1:
players[player_index].speed -= 0.004
# Brake
if KeyIsPressed('s'):
if players[player_index].speed > 0:
players[player_index].speed -= 0.006
else:
players[player_index].speed = 0
# spot light
if KeyIsPressed('e') and time.time() > players[player_index].light_time:
players[player_index].light_time = time.time() + light_reload
if not players[player_index].light:
players[player_index].light = True
else:
players[player_index].light = False
sound_click.play()
# Fire
if KeyIsPressed('space') and time.time() > players[player_index].fire_delay:
projectiles.append(players[player_index].fire())
# Lock thruster
if KeyIsPressed('left shift'):
players[player_index].thruster_locked = True
else:
players[player_index].thruster_locked = False
### TESTING
if KeyIsPressed('k'):
players[player_index].hp = 0
# Gravity
for q in players:
sumX = 0
sumY = 0
for p in blackhole_list:
GravDist = distance(q.x, q.y, p.x, p.y)
GravCheck = collidingCircles(q.x, q.y, 100, p.x, p.y, 1200)
if GravCheck:
F = G*q.mass*p.mass/GravDist
deex=(q.x) - (p.x)
deey=(q.y) - (p.y)
if deex < 0:
fdirection = 0+math.degrees(math.atan(deey/deex))
sumX += F * math.cos(math.radians(fdirection))
sumY -= F * math.sin(math.radians(fdirection))
if deex > 0:
fdirection = 180 + math.degrees(math.atan(deey / deex))
sumX += F * math.cos(math.radians(fdirection))
sumY -= F * math.sin(math.radians(fdirection))
yspeed = math.sin(math.radians(q.heading)) * q.speed
xspeed = math.cos(math.radians(q.heading)) * q.speed
yspeed += (sumY/q.mass)/1.5
xspeed += (sumX/q.mass)/1.5
q.speed = ((yspeed ** 2) + (xspeed ** 2)) ** 0.5
if xspeed < 0:
q.heading = 180 - math.degrees(math.asin(yspeed/q.speed))
elif xspeed > 0:
q.heading = 0 + math.degrees(math.asin(yspeed / q.speed))
# Check to respawn asteroids
if len(asteroid_list) < num_asteroids:
for i in range(0, num_asteroids-len(asteroid_list)):
rand_num = random.randint(0, len(blackhole_list)-1)
asteroid_list.append(Asteroid([blackhole_list[rand_num].x, blackhole_list[rand_num].y], random.randint(0, 200)))
# Set screen left and screen top corners
(world.screenleft, world.screentop) = (players[player_index].x-world.screenwidth/2, players[player_index].y-world.screenheight/2)
# Keep the screen left and screen top cornered once the player moves to the edge of the world
if world.screenleft < 0 - screen_buffer:
world.screenleft = 0 - screen_buffer
if world.screentop < 0 - screen_buffer:
world.screentop = 0 - screen_buffer
if world.screenleft + world.screenwidth > world.width + screen_buffer:
world.screenleft = (world.width - world.screenwidth) + screen_buffer
if world.screentop + world.screenheight > world.height + screen_buffer:
world.screentop = (world.height - world.screenheight) + screen_buffer
# Begin Render
screen.fill((0, 0, 0))
# Render our stars
star_field.drawStars(players[player_index])
# Render our world and its objects
world.render_objects(screen)
# Render asteroid
for asteroid in asteroid_list:
asteroid.update(dt)
asteroid.render(screen)
# Render projectiles
if len(projectiles) > 0:
for projectile in projectiles:
projectile.update()
projectile.render(screen)
# Draw border effect
screen.blit(shroud, (0, 0))
if not players[player_index].light:
screen.blit(small_shroud, (0, 0))
# Draw players
for player in players:
player.update()
if player.thrusting or player.thruster_locked:
# Flicker
if player.energy < ship_shutdown:
if time.time() > player.flicker_time:
player.flicker_time = time.time() + flicker_reload
screen.blit(player.glow, ((player.x-player.glow.get_width()/2)-world.screenleft, (player.y-player.glow.get_height()/2)-world.screentop))
else:
pass
# Constant
else:
screen.blit(player.glow, ((player.x-player.glow.get_width()/2)-world.screenleft, (player.y-player.glow.get_height()/2)-world.screentop))
if player.isVisible:
player.render(screen)
emitter_stream.set_position((players[player_index].x,players[player_index].y))
particle_system.update(dt)
particle_system.draw(world, screen)
# Render fonts
lives_font = font.render('Lives: '+str(players[player_index].lives), 1, (players[player_index].color))
score_font = font.render('Score: '+str(players[player_index].score), 1, (players[player_index].color))
screen.blit(score_font, (850, 10))
screen.blit(lives_font, (855, 32))
screen.blit(hp_font, (325, 670))
if players[player_index].hp < 2:
screen.blit(low_hp_font, (650, 644))
screen.blit(energy_font, (490, 670))
if players[player_index].energy < ship_shutdown:
screen.blit(low_energy_font, (650, 678))
if time.time() > warning_time and players[player_index].energy < ship_shutdown:
sound_warning.play()
warning_time = time.time() + 1
screen.blit(insanity_font, (140, 670))
if players[player_index].insanity > crew_panicked:
screen.blit(high_insanity_font, (650, 660))
for blackhole in blackhole_list:
if distance(blackhole.x, blackhole.y, players[player_index].x, players[player_index].y) < blackhole.size+500:
screen.blit(blackhole_font, (650, 638))
# Render radar
radar.update()
radar.draw(screen)
# Update black holes
if len(blackhole_list) > 0:
for blackhole in blackhole_list:
blackhole.draw(screen)
if distance(blackhole.x, blackhole.y, players[player_index].x, players[player_index].y) < blackhole.size:
teleport(blackhole)
# Finish Render Updates
pygame.display.update()
clock.tick(fps)
#####~~~~~~~~~~#####
##### End Loop #####
#####~~~~~~~~~~#####
while credits:
if KeyIsPressed('escape'):
credits = False
screen.fill((0, 0, 0))
screen.blit(credits_pic, (0, 0))
pygame.display.update()
# Exit the program
pygame.display.quit()
# Exit the program
pygame.display.quit() | Cheapsalsa/Neon-Space | neon_space.py | Python | gpl-3.0 | 46,481 | [
"Dalton"
] | 93e26458fc56bbb9e9d201f2f458e24ad723c6980c7d70c4b55e297da0e34bcc |
#/*
# *
# * OpenVPN for Kodi.
# *
# * Copyright (C) 2015 Brian Hornsby
# *
# * This program is free software: you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation, either version 3 of the License, or
# * (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program. If not, see <http://www.gnu.org/licenses/>.
# *
# */
import os
import sys
import subprocess
import time
import socket
class OpenVPNManagementInterface:
def __init__(self, ip, port, openvpn=None):
self.openvpn = openvpn
self.ip = ip
self.port = port
#self.openvpn._log_debug('OpenVPNManagementInterface: IP: [%s]' % ip)
#self.openvpn._log_debug(
# 'OpenVPNManagementInterface: Port: [%s]' % port)
self.buf = ''
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def connect(self):
self.sock.connect((self.ip, self.port))
def disconnect(self):
self.sock.close()
def send(self, msg):
#self.openvpn._log_debug('Sending: [%s]' % msg)
sent = self.sock.send(msg)
#self.openvpn._log_debug('Sent: [%d]' % sent)
def receive(self):
buf = ''
data = ''
while data != '\n':
data = self.sock.recv(1)
buf += data
if len(buf) > 0:
buf += '\n'
return buf
def is_running(ip, port):
interface = OpenVPNManagementInterface(ip, port)
config = None
state = None
try:
interface.connect()
interface.send('pid\n')
interface.send('state\n')
if interface.receive().startswith('>INFO:'):
data = interface.receive()
if data.startswith('SUCCESS: pid='):
pid = int(data.split('=')[1])
cmdline = 'ps -fp %d' % pid
ps = subprocess.Popen(cmdline, shell=True, stdout=subprocess.PIPE)
cmdline = ps.stdout.read()
ps.stdout.close()
output = cmdline.split('--config')
if len(output) > 1:
config = output[1].lstrip().split('--')[0].rstrip()
else:
cmdline = 'ps | grep -w %d | grep -vw grep' % pid
ps = subprocess.Popen(cmdline, shell=True, stdout=subprocess.PIPE)
cmdline = ps.stdout.read()
ps.stdout.close()
output = cmdline.split('--config')
if len(output) > 1:
config = output[1].lstrip().split('--')[0].rstrip()
data = interface.receive().split(',')
if len(data) > 1:
state = data[1]
except socket.error as exception:
return False, None, None
return True, config, state
def disconnect(ip, port):
interface = OpenVPNManagementInterface(ip, port)
try:
interface.connect()
interface.send('signal SIGTERM\n')
interface.disconnect()
except socket.error as exception:
raise OpenVPNError(3, 'Unable to disconnect OpenVPN')
class OpenVPNError(Exception):
def __init__(self, errno, string):
self.errno = errno
self.string = string
def __str__(self):
return '[%d]: %s' % (self.errno, self.string)
class OpenVPN:
def __init__(self, openvpn, ovpnconfig, ip='127.0.0.1', port=1337, sudo=False, sudopwd=None, args=None, timeout=1, debug=False):
self.openvpn = openvpn
self.ovpnconfig = ovpnconfig
self.ip = ip
self.port = int(port)
self.args = args
self.timeout = timeout
self.sudo = sudo
self.sudopwd = sudopwd
self.debug = debug
self._log_debug('OpenVPN: [%s]' % self.openvpn)
self._log_debug('OpenVPN Configuration: [%s]' % self.ovpnconfig)
self._log_debug('OpenVPN Management IP: [%s]' % self.ip)
self._log_debug('OpenVPN Management Port: [%d]' % self.port)
if self.args is not None:
self._log_debug('Additional Arguments: [%s]' % self.args)
if self.openvpn is None or not os.path.exists(self.openvpn) or not os.path.isfile(self.openvpn):
self._log_error('OpenVPN: ERROR: Specified OpenVPN does not exist')
if self.ovpnconfig is None or not os.path.exists(self.ovpnconfig) or not os.path.isfile(self.ovpnconfig):
self._log_error(
'OpenVPN: ERROR: Specified OpenVPN configuration file does not exist')
self.interface = None
self.workdir = os.path.dirname(ovpnconfig)
self.logfile = os.path.join(self.workdir, 'openvpn.log')
def _log_debug(self, msg):
if self.debug:
print 'OpenVPN: DEBUG: %s' % msg
def _log_error(self, msg):
print 'OpenVPN: ERROR: %s' % msg
def connect_to_interface(self, logerror):
if self.interface is None:
self.interface = OpenVPNManagementInterface(self.ip, self.port, self)
try:
self.interface.connect()
except socket.error as exception:
if logerror:
self._log_error(exception)
else:
self._log_debug(exception)
self.interface = None
return False
return True
def disconnect(self):
self.connect_to_interface(False)
self._log_debug('Disconnecting OpenVPN')
self.interface.send('signal SIGTERM\n')
time.sleep(self.timeout)
self.interface.disconnect()
self.interface = None
self._log_debug('Disconnect OpenVPN successful')
def connect(self):
self._log_debug('Connecting OpenVPN')
isrunning = self.connect_to_interface(False)
if isrunning:
self._log_debug('OpenVPN is already running')
self.interface.disconnect()
raise OpenVPNError(1, 'OpenVPN is already running')
cmdline = '\'%s\' --cd \'%s\' --daemon --management %s %d --config \'%s\' --log \'%s\'' % (
self.openvpn, self.workdir, self.ip, self.port, self.ovpnconfig, self.logfile)
if self.args is not None:
cmdline = '%s %s' % (cmdline, self.args)
self._log_debug('Command line: [%s]' % cmdline)
if self.sudo:
self._log_debug('Using sudo')
if self.sudopwd:
cmdline = 'echo \'%s\' | sudo -S %s' % (self.sudopwd, cmdline)
else:
cmdline = 'sudo %s' % (cmdline)
self.process = subprocess.Popen(cmdline, cwd=self.workdir, shell=True,
stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
time.sleep(self.timeout)
if not self.connect_to_interface(True):
self._log_debug('Connect OpenVPN failed')
raise OpenVPNError(
2, 'Unable to connect to OpenVPN management interface')
self._log_debug('Connect OpenVPN successful')
| brianhornsby/script.openvpn | resources/lib/openvpn.py | Python | gpl-3.0 | 7,334 | [
"Brian"
] | d56e7d265d60a646c571bcae0074448a29fc1475236bfa8fe49503618eea300d |
#!/bin/env python
# create_meta.py
# Generate the WHAM input files for PyBrella given a complete PyBrella run
from __future__ import division, print_function
from argparse import ArgumentParser, SUPPRESS
from glob import glob
from shared import *
from shutil import rmtree
from stat import S_IEXEC
parser = ArgumentParser(description="create WHAM input files for PyBrella")
parser.add_argument("--force", '-f', help="AMBER force constant (default is 5)", type=float, action="store", default=5)
parser.add_argument("--out", '-o', help="output directory", type=str, action=FullPath, default="wham")
parser.add_argument("--umbrella", '-u', help="PyBrella output directory", type=str, action=FullPath, default="umbrella")
parser.add_argument("--cont", help=SUPPRESS, action="store_true")
args = parser.parse_args()
WORKDIR = os.getcwd()
OUTPATH = args.out
if not os.path.isabs(args.out):
OUTPATH = WORKDIR + "/" + args.out
UMBRELLAPATH = args.umbrella
if not os.path.isabs(args.umbrella):
UMBRELLAPATH = WORKDIR + "/" + args.umbrella
if not args.cont:
try:
rmtree(OUTPATH)
except OSError:
pass
os.mkdir(OUTPATH)
dists = [name.split('/')[-1] for name in glob("%s/*.*" % UMBRELLAPATH)
if os.path.isdir(os.path.join(UMBRELLAPATH, name))]
dists.sort(key=lambda x: [int(y) for y in x.split('.')])
distFileNames = []
minPotentials = []
force = args.force * 2
sys.stderr.write("Processing distances: ")
def processDirectory(d):
sys.stderr.write(d + " ")
runs = [n.split('/')[-1] for n in glob("%s/%s/*" % (UMBRELLAPATH, d))
if os.path.isdir(os.path.join(UMBRELLAPATH, name))]
system("rm '%s/%s/dist_vs_t'" % (UMBRELLAPATH, d))
for runDir in runs:
system("cat '%s/%s/%s/dist_vs_t' >> '%s/%s/dist_vs_t'\n" % (UMBRELLAPATH, d, runDir, UMBRELLAPATH, d))
system("mv %s/%s/dist_vs_t %s/dist_vs_t_%s" % (UMBRELLAPATH, d, OUTPATH, d))
if not args.cont:
parMap(processDirectory, dists, n=(cpu_count() / 2))
sys.stderr.write("\nProcessing file lengths.\n")
maxLength = 0
distVsTs = glob("%s/dist_vs_t*" % OUTPATH)
for i, fp in enumerate(distVsTs):
length = int(system("wc -l " + str(fp)).strip().split()[0])
if length > maxLength:
maxLength = length
sys.stderr.write("\r" + str(int(float(i) / len(distVsTs) * 50)) + "% complete.")
for i, fp in enumerate(distVsTs):
length = int(system("wc -l " + str(fp)).strip().split()[0])
reps = int(maxLength / length)
for _ in xrange(reps):
system("cat %s >> %s_new" % (fp, fp))
system("mv %s_new %s" % (fp, fp))
sys.stderr.write("\r" + str(int(float(i) / len(distVsTs) * 50 + 50)) + "% complete.")
sys.stderr.write("\n")
sys.stderr.write("Creating meta.dat.\n")
for dist in dists:
distFileNames.append("dist_vs_t_" + dist)
distRst = open("%s/%s/0/dist.rst" % (UMBRELLAPATH, dist))
for line in distRst:
minPotentials.append(float(line.split()[4][3:-1]))
os.chdir(OUTPATH)
with open("meta.dat", 'w') as metaDat:
for i in xrange(len(distFileNames)):
metaDat.write("%s %.3f %.14f\n" % (distFileNames[i], minPotentials[i], force))
with open("run.sh", 'w') as runScript:
runScript.write("wham %.3f %.3f 200 0.01 300 0 meta.dat result.dat\n" % (float(dists[0]), float(dists[-1])))
os.chmod("run.sh", os.stat("run.sh").st_mode | S_IEXEC)
os.chdir(WORKDIR)
| charlesyuan314/PyBrella | create_meta.py | Python | gpl-3.0 | 3,354 | [
"Amber"
] | 92b543b5a1e64944485cdfdeeff1e4f242d75a0996c85bf59e23ab45920f651c |
# -*- coding: utf-8 -*-
"""Release data for the IPython project."""
#-----------------------------------------------------------------------------
# Copyright (c) 2008, IPython Development Team.
# Copyright (c) 2001, Fernando Perez <fernando.perez@colorado.edu>
# Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
# Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# Name of the package for release purposes. This is the name which labels
# the tarballs and RPMs made by distutils, so it's best to lowercase it.
name = 'ipython'
# IPython version information. An empty _version_extra corresponds to a full
# release. 'dev' as a _version_extra string means this is a development
# version
_version_major = 3
_version_minor = 1
_version_patch = 0
# _version_extra = 'dev'
# _version_extra = 'rc1'
_version_extra = '' # Uncomment this for full releases
# release.codename is deprecated in 2.0, will be removed in 3.0
codename = ''
# Construct full version string from these.
_ver = [_version_major, _version_minor, _version_patch]
__version__ = '.'.join(map(str, _ver))
if _version_extra:
__version__ = __version__ + '-' + _version_extra
version = __version__ # backwards compatibility name
version_info = (_version_major, _version_minor, _version_patch, _version_extra)
# Change this when incrementing the kernel protocol version
kernel_protocol_version_info = (5, 0)
kernel_protocol_version = "%i.%i" % kernel_protocol_version_info
description = "IPython: Productive Interactive Computing"
long_description = \
"""
IPython provides a rich toolkit to help you make the most out of using Python
interactively. Its main components are:
* Powerful interactive Python shells (terminal- and Qt-based).
* A web-based interactive notebook environment with all shell features plus
support for embedded figures, animations and rich media.
* Support for interactive data visualization and use of GUI toolkits.
* Flexible, embeddable interpreters to load into your own projects.
* A high-performance library for high level and interactive parallel computing
that works in multicore systems, clusters, supercomputing and cloud scenarios.
The enhanced interactive Python shells have the following main features:
* Comprehensive object introspection.
* Input history, persistent across sessions.
* Caching of output results during a session with automatically generated
references.
* Extensible tab completion, with support by default for completion of python
variables and keywords, filenames and function keywords.
* Extensible system of 'magic' commands for controlling the environment and
performing many tasks related either to IPython or the operating system.
* A rich configuration system with easy switching between different setups
(simpler than changing $PYTHONSTARTUP environment variables every time).
* Session logging and reloading.
* Extensible syntax processing for special purpose situations.
* Access to the system shell with user-extensible alias system.
* Easily embeddable in other Python programs and GUIs.
* Integrated access to the pdb debugger and the Python profiler.
The parallel computing architecture has the following main features:
* Quickly parallelize Python code from an interactive Python/IPython session.
* A flexible and dynamic process model that be deployed on anything from
multicore workstations to supercomputers.
* An architecture that supports many different styles of parallelism, from
message passing to task farming.
* Both blocking and fully asynchronous interfaces.
* High level APIs that enable many things to be parallelized in a few lines
of code.
* Share live parallel jobs with other users securely.
* Dynamically load balanced task farming system.
* Robust error handling in parallel code.
The latest development version is always available from IPython's `GitHub
site <http://github.com/ipython>`_.
"""
license = 'BSD'
authors = {'Fernando' : ('Fernando Perez','fperez.net@gmail.com'),
'Janko' : ('Janko Hauser','jhauser@zscout.de'),
'Nathan' : ('Nathaniel Gray','n8gray@caltech.edu'),
'Ville' : ('Ville Vainio','vivainio@gmail.com'),
'Brian' : ('Brian E Granger', 'ellisonbg@gmail.com'),
'Min' : ('Min Ragan-Kelley', 'benjaminrk@gmail.com'),
'Thomas' : ('Thomas A. Kluyver', 'takowl@gmail.com'),
'Jorgen' : ('Jorgen Stenarson', 'jorgen.stenarson@bostream.nu'),
'Matthias' : ('Matthias Bussonnier', 'bussonniermatthias@gmail.com'),
}
author = 'The IPython Development Team'
author_email = 'ipython-dev@scipy.org'
url = 'http://ipython.org'
download_url = 'https://github.com/ipython/ipython/downloads'
platforms = ['Linux','Mac OSX','Windows XP/Vista/7/8']
keywords = ['Interactive','Interpreter','Shell','Parallel','Distributed',
'Web-based computing', 'Qt console', 'Embedding']
classifiers = [
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: System :: Distributed Computing',
'Topic :: System :: Shells'
]
| madelynfreed/rlundo | venv/lib/python2.7/site-packages/IPython/core/release.py | Python | gpl-3.0 | 5,560 | [
"Brian"
] | 443150d60955aee889838c95d923c58e9248fa194705c6a00afbe35d8f75f364 |
import logging
from celery.task import task
from django.conf import settings
from django.utils import timezone
from myvoice.clinics.models import Visit
from . import importer, utils as survey_utils
from .models import Survey
from .textit import TextItApi, TextItException
logger = logging.getLogger(__name__)
def _get_survey_start_time(tm):
# Schedule the survey to be sent in the future.
eta = tm + settings.DEFAULT_SURVEY_DELAY
earliest, latest = settings.SURVEY_TIME_WINDOW
if eta.hour > latest: # It's too late in the day - send tomorrow.
eta = eta + timezone.timedelta(days=1)
eta = eta.replace(hour=earliest, minute=0, second=0, microsecond=0)
elif eta.hour < earliest: # It's too early in the day - send later.
eta = eta.replace(hour=earliest, minute=0, second=0, microsecond=0)
return eta
@task
def import_responses():
"""Periodically check for new responses for each survey."""
logger.debug('Importing responses from active surveys.')
for survey in Survey.objects.active():
logger.debug('Starting to import responses for flow {0}.'.format(survey.flow_id))
importer.import_responses(survey.flow_id)
logger.debug('Finished importing responses for flow {0}.'.format(survey.flow_id))
@task
def start_feedback_survey(visit_pk):
"""Initiate the patient feedback survey for a Visit."""
try:
survey = Survey.objects.get(role=Survey.PATIENT_FEEDBACK)
except Survey.DoesNotExist:
logger.exception("No patient feedback survey is registered.")
raise
try:
visit = Visit.objects.get(pk=visit_pk)
except Visit.DoesNotExist:
logger.exception("Unable to find visit with pk {}.".format(visit_pk))
raise
if visit.survey_sent is not None:
logger.warning("Survey has already been sent for visit {}.".format(visit_pk))
return
try:
TextItApi().start_flow(survey.flow_id, visit.mobile)
except TextItException:
logger.exception("Error sending survey for visit {}.".format(visit.pk))
raise
else:
visit.survey_sent = timezone.now()
visit.save()
logger.debug("Initiated survey for visit {} "
"at {}.".format(visit.pk, visit.survey_sent))
@task
def handle_new_visits():
"""
Schedule when feedback survey should start for all new visitors.
Except for blocked visitors.
"""
blocked = Visit.objects.exclude(sender='').values_list('sender', flat=True).distinct()
try:
# Look for visits for which we haven't sent surveys.
# We use welcome_sent to show that we have not scheduled surveys
# We can't use survey_sent because they are async and we may experience
# overlaps.
visits = Visit.objects.filter(welcome_sent__isnull=True,
mobile__isnull=False).exclude(mobile__in=blocked)
# Grab the phone numbers of all patients from applicable visits.
new_visits = []
phones = []
for visit in visits:
# Only schedule the survey to be started if the phone number
# can be converted to valid international format.
international = survey_utils.convert_to_international_format(visit.mobile)
if international:
new_visits.append(visit)
phones.append(international)
else:
logger.debug("Unable to send welcome message to "
"visit {}.".format(visit.pk))
if not new_visits:
# Don't bother continuing if there aren't any new visits.
return
# Schedule when to initiate the flow.
eta = _get_survey_start_time(timezone.now())
for visit in new_visits:
if visit.survey_sent is not None:
logger.debug("Somehow a survey has already been sent for "
"visit {}.".format(visit.pk))
continue
start_feedback_survey.apply_async(args=[visit.pk], eta=eta)
logger.debug("Scheduled survey to start for visit "
"{} at {}.".format(visit.pk, eta))
# update visits at the end, since adding a value for welcome_sent prevents
# us from finding the values we were originally interested in
welcomed_ids = [v.pk for v in new_visits]
# We update welcome_sent even though we don't send any welcome msg.
Visit.objects.filter(pk__in=welcomed_ids).update(welcome_sent=timezone.now())
except:
logger.exception("Encountered unexpected error while handling new visits.")
raise
| myvoice-nigeria/myvoice | myvoice/survey/tasks.py | Python | bsd-2-clause | 4,682 | [
"VisIt"
] | a907cc1ffe4d8a639e48d543a8a5d12dc6d48bb552cce1366c734146f478c2a2 |
from django.shortcuts import render
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from rango.models import Category, Page, UserProfile
from rango.forms import CategoryForm, PageForm, UserProfileForm
from datetime import datetime
from rango.webhose_search import run_query
from registration.backends.simple.views import RegistrationView
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth import authenticate, login
# Create your views here.
def visitor_cookie_handler(request):
# Get the number of visits to the site.
# We use the COOKIES.get() function to obtain the visits cookie.
# If the cookie exists, the value returned is casted to an integer.
# If the cookie doesn't exist, then the default value of 1 is used.
visits = int(request.session.get('visits', '1'))
last_visit_cookie = request.session.get('last_visit', str(datetime.now()))
last_visit_time = datetime.strptime(last_visit_cookie[:-7], "%Y-%m-%d %H:%M:%S")
#last_visit_time = datetime.now()
# If it's been more than a day since the last visit...
if (datetime.now() - last_visit_time).seconds > 0:
visits = visits + 1
#update the last visit cookie now that we have updated the count
request.session['last_visit'] = str(datetime.now())
else:
visits = 1
# set the last visit cookie
request.session['last_visit'] = last_visit_cookie
# update/set the visits cookie
request.session['visits'] = visits
def index(request):
#context_dict = {'boldmessage': "Crunchie, creamy, cookie, candy, cupcake!"}
request.session.set_test_cookie()
category_list = Category.objects.order_by('-likes')[:5]
page_list = Page.objects.order_by('-views')[:5]
context_dict = {'categories': category_list, 'pages': page_list}
visitor_cookie_handler(request)
context_dict['visits'] = request.session['visits']
print(request.session['visits'])
response = render(request, 'rango/index.html', context=context_dict)
return response
def about(request):
if request.session.test_cookie_worked():
print("TEST COOKIE WORKED!")
request.session.delete_test_cookie()
# To complete the exercise in chapter 4, we need to remove the following line
# return HttpResponse("Rango says here is the about page. <a href='/rango/'>View index page</a>")
# and replace it with a pointer to ther about.html template using the render method
return render(request, 'rango/about.html',{})
def show_category(request, category_name_slug):
# Create a context dictionary which we can pass
# to the template rendering engine.
context_dict = {}
try:
# Can we find a category name slug with the given name?
# If we can't, the .get() method raises a DoesNotExist exception.
# So the .get() method returns one model instance or raises an exception.
category = Category.objects.get(slug=category_name_slug)
# Retrieve all of the associated pages.
# Note that filter() returns a list of page objects or an empty list
pages = Page.objects.filter(category=category)
# Adds our results list to the template context under name pages.
context_dict['pages'] = pages
# We also add the category object from
# the database to the context dictionary.
# We'll use this in the template to verify that the category exists.
context_dict['category'] = category
# We get here if we didn't find the specified category.
# Don't do anything -
# the template will display the "no category" message for us.
except Category.DoesNotExist:
context_dict['category'] = None
context_dict['pages'] = None
# create a default query based on the category name
# to be shown in the search box
context_dict['query'] = category.name
result_list = []
if request.method == 'POST':
query = request.POST['query'].strip()
if query:
# Run our Webhose function to get the results list!
result_list = run_query(query)
context_dict['query'] = query
context_dict['result_list'] = result_list
# Go render the response and return it to the client.
return render(request, 'rango/category.html', context_dict)
def add_category(request):
form = CategoryForm()
# A HTTP POST?
if request.method == 'POST':
form = CategoryForm(request.POST)
# Have we been provided with a valid form?
if form.is_valid():
# Save the new category to the database.
category = form.save(commit=True)
print(category, category.slug)
# Now that the category is saved
# We could give a confirmation message
# But instead since the most recent catergory added is on the index page
# Then we can direct the user back to the index page.
return index(request)
else:
# The supplied form contained errors - just print them to the terminal.
print(form.errors)
# Will handle the bad form (or form details), new form or no form supplied cases.
# Render the form with error messages (if any).
return render(request, 'rango/add_category.html', {'form': form})
def add_page(request, category_name_slug):
try:
category = Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
category = None
form = PageForm()
if request.method == 'POST':
form = PageForm(request.POST)
if form.is_valid():
if category:
page = form.save(commit=False)
page.category = category
page.views = 0
page.save()
# probably better to use a redirect here.
return show_category(request, category_name_slug)
else:
print(form.errors)
context_dict = {'form':form, 'category': category}
return render(request, 'rango/add_page.html', context_dict)
def search(request):
result_list = []
if request.method == 'POST':
query = request.POST['query'].strip()
if query:
# Run our Webhose function to get the results list!
result_list = run_query(query)
return render(request, 'rango/search.html', {'result_list': result_list})
def register(request):
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
profile.save()
registered = True
else:
print(user_form.errors, profile_form.errors)
else:
## ON the PDF of tangowithdjango19,the e.g is like that:
# else:
# print(user_form.errors, profile_form.errors)
# else:
# user_form = UserForm()
# profile_form = UserProfileForm()
user_form = UserForm()
profile_form = UserProfileForm()
return render(request,
'rango/register.html',
{'user_form': user_form,
'profile_form': profile_form,
'registered': registered
})
def track_url(request):
page_id = None
if request.method == 'GET':
if 'page_id' in request.GET:
page_id = request.GET['page_id']
if page_id:
try:
page = Page.objects.get(id=page_id)
page.views = page.views + 1
page.save()
return redirect(page.url)
except:
return HttpResponse("Page id {0} not found".format(page_id))
print("No page_id in get string")
return redirect(reverse('index'))
@login_required
def register_profile(request):
form = UserProfileForm()
if request.method == 'POST':
form = UserProfileForm(request.POST, request.FILES)
if form.is_valid():
user_profile = form.save(commit=False)
user_profile.user = request.user
user_profile.save()
return redirect('index')
else:
print(form.errors)
context_dict = {'form':form}
return render(request, 'rango/profile_registration.html', context_dict)
class RangoRegistrationView(RegistrationView):
def get_success_url(self, user):
return reverse('register_profile')
@login_required
def profile(request, username):
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return redirect('index')
userprofile = UserProfile.objects.get_or_create(user=user)[0]
form = UserProfileForm({'website': userprofile.website, 'picture': userprofile.picture})
if request.method == 'POST':
form = UserProfileForm(request.POST, request.FILES, instance=userprofile)
if form.is_valid():
form.save(commit=True)
return redirect('profile', user.username)
else:
print(form.errors)
return render(request, 'rango/profile.html', {'userprofile': userprofile, 'selecteduser': user, 'form': form})
@login_required
def list_profiles(request):
# user_list = User.objects.all()
userprofile_list = UserProfile.objects.all()
return render(request, 'rango/list_profiles.html', { 'userprofile_list' : userprofile_list})
| jxltom/tango_with_django_19 | code/tango_with_django_project/rango/views.py | Python | apache-2.0 | 10,052 | [
"VisIt"
] | 6afd1d4b06c6642166365117336064c2bda364cef7e98c21e4bbab055bc9e4eb |
import numpy as np
from ase.structure import molecule
from ase.constraints import FixedPlane
from ase.optimize import QuasiNewton
from gpaw import GPAW, FermiDirac
from gpaw.test import equal
#----------------------------------
# Initialization
molname = 'benzene-mol'
dimername = 'benzene-dimer'
f = open('benzene-dimer-T-shape.dat', 'w')
h = 0.18
xc = 'vdW-DF'
#-------------------------------------
# relaxation of the benzene molecule
benz = molecule('C6H6')
benz.set_pbc(False)
tags = np.zeros_like(benz)
benz.set_tags(tags)
benz.center(vacuum=4.0)
cell = benz.get_cell()
calc = GPAW(nbands=-1,
h=h,
xc=xc,
occupations=FermiDirac(0.0),
txt=molname+'_relax.txt')
benz.set_calculator(calc)
# qn constraint
for i in range(len(benz)):
plane = FixedPlane(i, (0, 0, 1))
benz.set_constraint(plane)
qn = QuasiNewton(benz,
logfile=molname + '_relax.log',
trajectory=molname + '_relax.traj')
qn.run(fmax=0.01)
e_mol = benz.get_potential_energy()
del calc
#-------------------------------------
# mapping out the benzene dimer (T-shaped) intermolecular distance
e_array = np.zeros(20)
d_array = np.zeros(20)
k = 0
for i in np.linspace(-6, 6, 20):
k_str = str(k)
z = 6.0 + i * 0.3
BENZ = benz.copy()
dimer = BENZ.copy()
tags = np.ones_like(dimer)
dimer.set_tags(tags)
BENZ.rotate('x', np.pi / 2, center='COM')
BENZ.translate([0, 0, z])
dimer.extend(BENZ)
dimer.set_cell([cell[0, 0], cell[1, 1], cell[2, 2] + 8])
dimer.center()
dimer.set_pbc(False)
pos = dimer.get_positions()
d = pos[21, 2] - pos[0, 2]
calc = GPAW(nbands=-2,
h=h,
xc=xc,
occupations=FermiDirac(0.0),
txt=dimername + '_' + k_str + '.txt')
dimer.set_calculator(calc)
e_dimer = dimer.get_potential_energy()
del calc
# interaction energy
e_int = e_dimer - 2 * e_mol
e_array[k] = e_int
d_array[k] = d
print >> f, str(round(d, 3)), e_int
f.flush()
k += 1
# E_int-curve minimum
e_0 = 100.
d_0 = 0.
for i in range(len(e_array)):
if e_array[i] < e_0:
e_0 = e_array[i]
d_0 = d_array[i]
print >> f, '****************'
print >> f, 'Minimum (E_int,d):', e_0, d_0
f.close()
equal(e_0 , -0.11, 0.01)
equal(d_0 , 2.86, 0.05)
| robwarm/gpaw-symm | gpaw/test/big/vdw/benzene-dimer-T-shaped.py | Python | gpl-3.0 | 2,370 | [
"ASE",
"GPAW"
] | 55c52833d5f67ed4faec5b0782a96a7cdd583808ddc97737faf7d2b7458e0dd9 |
#import sys
import numpy as np
from numpy import array as npa
import matplotlib.pyplot as plt
#from matplotlib.collections import LineCollection
#from matplotlib.gridspec import GridSpec
import pymatgen as mg
from pymatgen.io.vasp.outputs import Vasprun, Procar
from pymatgen.symmetry.bandstructure import HighSymmKpath
from pymatgen.electronic_structure.core import Spin, Orbital
#bands = Vasprun("./vasprun.xml").get_band_structure("./KPOINTS", line_mode=True)
dosrun = Vasprun("./vasprun.xml")
spd_dos = dosrun.complete_dos.get_spd_dos()
run = Vasprun("./vasprun.xml", parse_projected_eigen=True)
bands = run.get_band_structure("./KPOINTS", line_mode=True, efermi=dosrun.efermi)
data = Procar("./PROCAR").data
data = data[Spin.up].sum(axis=2)
klist = [[i] for i in range(250)]
name = [None]*3
name[0] = "As"
name[1] = "Ge"
name[2] = "Cd"
##pbands = bands.get_projections_on_elts_and_orbitals({name: ["s", "p", "d"]})
pbands0 = bands.get_projections_on_elements_and_orbitals({name[0]: ["s", "p"]})
pbands1 = bands.get_projections_on_elements_and_orbitals({name[1]: ["s", "p"]})
pbands2 = bands.get_projections_on_elements_and_orbitals({name[2]: ["s", "p"]})
# compute s, p, d normalized contributions
contrib = np.zeros((3, bands.nb_bands, len(bands.kpoints), 2))
for b in range(bands.nb_bands):
for k in range(len(bands.kpoints)):
sc0 = pbands0[Spin.up][b][k][name[0]]["s"]**2
pc0 = pbands0[Spin.up][b][k][name[0]]["p"]**2
tot0 = sc0 + pc0
if tot0 != 0.0:
contrib[0, b, k, 0] = sc0 / tot0
contrib[0, b, k, 1] = pc0 / tot0
sc1 = pbands1[Spin.up][b][k][name[1]]["s"]**2
pc1 = pbands1[Spin.up][b][k][name[1]]["p"]**2
tot1 = sc1 + pc1
if tot1 != 0.0:
contrib[1, b, k, 0] = sc1 / tot1
contrib[1, b, k, 1] = pc1 / tot1
sc2 = pbands2[Spin.up][b][k][name[2]]["s"]**2
pc2 = pbands2[Spin.up][b][k][name[2]]["p"]**2
tot2 = sc2 + pc2
if tot2 != 0.0:
contrib[2, b, k, 0] = sc2 / tot2
contrib[2, b, k, 1] = pc2 / tot2
markers = ("o", "^", "s", "h", "D", "*")
colorlist = ("r", "g", "b", "m", "c", "y")
for j in range(72):
ek = zip(range(len(bands.kpoints)), [e - bands.efermi for e in bands.bands[Spin.up][j]])
orbital_contribution = zip ([contrib[0,j,k,0] for k in range (250)], [contrib[0,j,k,1] for k in range (250)])
if contrib[0,j,k,0] > contrib[0,j,k,1]:
marker = "o" ; c = "y" # s orbital dominating
else:
marker = "^" ; c = "m" # p orbital dominating
plt.scatter(*zip(*ek), s = 100, marker = marker, c=c, alpha=0.5)
# for k in range(len(bands.kpoints)):
# if contrib[1,j,k,0] > contrib[1,j,k,1]:
# marker = "s" ; c = "y" # s orbital dominating
# else:
# marker = "*" ; c = "m"
# plt.scatter((k,bands.bands[Spin.up][j]-bands.efermi), s = 100, marker = marker, c=c, alpha=0.5)
# for k in range(len(bands.kpoints)):
# if contrib[2,j,k,0] > contrib[2,j,k,1]:
# marker = "s" ; c = "y" # s orbital dominating
# else:
# marker = "*" ; c = "m"
# plt.scatter((k,bands.bands[Spin.up][j]-bands.efermi), s = 100, marker = marker, c=c, alpha=0.5)
# plt.scatter(*zip(*ek), s = 100, marker = marker, c=c, alpha=0.5)
plt.show() | neelravi/vasp | sample.py | Python | gpl-3.0 | 3,223 | [
"VASP",
"pymatgen"
] | 821551654bad7c7914550bec2404878c9fc7fee61d4798d77b5034aebe97a59b |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
"""
Transform an Excel file produced by ASEBA scoring to an NCANDA release format.
```bash
# In this example, the xlsx files are names ASR_Scored_2018-08.xlsx, etc.
ASEBA_FOLDER=/fs/ncanda-share/beta/simon/aseba_082018
for form in asr ysr cbc; do
./aseba_reformat.py --form $form \
--input ${ASEBA_FOLDER}/${form^^}_Scored_2018-08.xlsx \
--output ${ASEBA_FOLDER}/${form}_scored.csv
done
```
"""
import sys
import pandas
import argparse
from aseba_form import get_aseba_form
def parse_args(input_args=None):
parser = argparse.ArgumentParser(
description="Reformat the output of ASEBA scoring for a particular form.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('-i', '--input', help="xlsx filename to process.",
action="store", required=True)
parser.add_argument('-o', '--output', help="CSV file to write output to.",
action="store", default=sys.stdout)
parser.add_argument('-f', '--form',
choices=["asr", "ysr", "cbc"],
help="ASEBA form to extract the raw values for",
required=True)
parser.add_argument('-v', '--verbose',
help="Print diagnostic info to stdout",
action="store_true")
return parser.parse_args(input_args)
if __name__ == "__main__":
args = parse_args()
data = pandas.read_excel(args.input, sheet_name=0) # return first sheet
# Rename the columns per definition in aseba_form and only keep those columns
form_specifics = get_aseba_form(args.form)
dict_renames = form_specifics.post_score_renames
data = data.rename(columns=dict_renames)
actual_columns = [x for x in dict_renames.values() if x in data.columns]
missed_columns = [(k, v) for k, v in dict_renames.items()
if v not in data.columns]
dropped_columns = [x for x in data.columns if x not in actual_columns]
if args.verbose:
print("Missed columns: ", missed_columns)
print("Unmatched (and therefore dropped) columns: ", dropped_columns)
data = data.loc[:, actual_columns]
# Modify the metadata columns
if 'arm' in data.columns:
data.loc[data['arm'].isnull(), 'arm'] = 'standard'
else:
data['arm'] = 'standard'
if data['visit'].str.contains('_visit_arm_1', na=False).any():
data['visit'] = (data['visit']
.str.replace('_visit_arm_1', '')
.str.replace(r'^(\dy)$', r'followup_\1'))
# Cheap way to ensure that these three columns come first
index_fields = ['subject', 'arm', 'visit']
data.set_index(index_fields, inplace=True)
data.sort_index(inplace=True)
try:
data.to_csv(args.output, index=True)
except IOError: # e.g. when writing to stdout that's suddenly closed
pass
| sibis-platform/ncanda-datacore | scripts/reporting/aseba_reformat.py | Python | bsd-3-clause | 3,090 | [
"VisIt"
] | 458029bef58263bdd29f4b6fb212abc1cab73ab0d59cb852561e322140bcdf7d |
__author__ = 'bptripp'
import os
import csv
import numpy as np
from itertools import islice
from depthmap import *
from PIL import Image
import scipy
import scipy.misc
from depthmap import loadOBJ, Display
from heuristic import calculate_metric_map
import cPickle
import matplotlib.pyplot as plt
# class GraspDataSource(object):
#
# def __init__(self, csv_file_name, obj_directory_name, range=None, imsize=(50,50)):
# self.obj_directory_name = obj_directory_name
# self.imsize = imsize
#
# self.objfiles = []
# self.orientations = []
# self.positions = []
# self.success = []
#
# with open(csv_file_name, 'rb') as csvfile:
# r = csv.reader(csvfile, delimiter=',')
# for row in islice(r, 1, None):
# self.objfiles.append(obj_directory_name + os.path.sep + row[0])
# self.orientations.append([float(row[1]), float(row[2]), float(row[3])])
# self.positions.append([float(row[4]), float(row[5]), float(row[6])])
# self.success.append(float(row[8]))
#
# if range is not None:
# self.objfiles = self.objfiles[range[0]:range[1]]
# self.orientations = self.orientations[range[0]:range[1]]
# self.positions = self.positions[range[0]:range[1]]
# self.success = self.success[range[0]:range[1]]
#
# self.display = Display(imsize=imsize)
#
#
# def get_XY(self, n):
# ind = np.random.randint(0, len(self.objfiles), n)
# X = np.zeros((n, 1, self.imsize[0], self.imsize[1]))
# Y = np.zeros(n)
# for i in range(n):
# # print(self.objfiles[ind[i]])
# verts, faces = loadOBJ(self.objfiles[ind[i]])
# new_verts = move_vertices(self.positions[ind[i]], self.orientations[ind[i]], verts)
# self.display.set_mesh(new_verts, faces)
# X[i,0,:,:] = self.display.read_depth()
# Y[i] = self.success[ind[i]]
# return np.array(X), np.array(Y)
# class AshleyDataSource(object):
# def __init__(self):
# self.X = np.zeros((1000,1,28,28))
# for i in range(1000):
# # filename = '25_mug-02-Feb-2016-12-40-43.obj131001'
# filename = '../data/imgs/25_mug-02-Feb-2016-12-40-43.obj13' + str(1001+i) + '.png'
# im = Image.open(filename)
# self.X[i,0,:,:] = np.array(im.getdata()).reshape((28,28))
#
# for line in open('../data/labels.csv', "r"):
# vals = line.split(',')
# self.Y = map(int, vals)
#
# # normalize input
# self.X = (self.X - np.mean(self.X.flatten())) / np.std(self.X.flatten())
# def make_depth_from_gripper(obj_filename, param_filename, bottom=0.2):
# """
# Make depth images from perspective of gripper.
# """
# verts, faces = loadOBJ(obj_filename)
# verts = np.array(verts)
# min_bounding_box = np.min(verts, axis=0)
# max_bounding_box = np.max(verts, axis=0)
#
# # set bounding box horizontal centre to 0,0
# verts[:,0] = verts[:,0] - (min_bounding_box[0]+max_bounding_box[0])/2.
# verts[:,1] = verts[:,1] - (min_bounding_box[1]+max_bounding_box[1])/2.
# # set bottom of bounding box to "bottom"
# verts[:,2] = verts[:,2] + bottom - min_bounding_box[2]
#
# d = Display(imsize=(80,80))
#
# labels = []
# depths = []
# c = 0
# for line in open(param_filename, "r"):
# # print(c)
# # if c == 100:
# # break
# c = c + 1
#
# vals = line.split(',')
# gripper_pos = [float(vals[0]), float(vals[1]), float(vals[2])]
# gripper_orient = [float(vals[3]), float(vals[4]), float(vals[5])]
# rot = rot_matrix(gripper_orient[0], gripper_orient[1], gripper_orient[2])
# labels.append(int(vals[6]))
#
# d.set_camera_position(gripper_pos, rot, .4)
# d.set_mesh(verts, faces) #this mut go after set_camera_position
# depth = d.read_depth()
# depths.append(depth)
#
# d.close()
# return np.array(depths), np.array(labels)
def load_all_params(param_filename, return_power_pinch=False):
"""
Example line from file:
"104_toaster_final-18-Dec-2015-13-56-59.obj",2.99894,0.034299705,0.4714164,0.09123467,0.0384472,0.5518384,0.0880979987086634,0.0
"""
bad = [
# Ashley says these are bad after looking through V-REP images ...
'24_bowl-24-Feb-2016-17-38-53',
'24_bowl-26-Feb-2016-08-35-29',
'24_bowl-27-Feb-2016-23-52-43',
'24_bowl-29-Feb-2016-15-01-53',
'25_mug-11-Feb-2016-02-25-25',
'28_Spatula_final-10-Mar-2016-18-31-08',
'42_wineglass_final-01-Nov-2015-19-25-18',
# These somehow have two objects in V-REP images ...
'24_bowl-02-Mar-2016-07-03-29',
'24_bowl-03-Mar-2016-22-54-50',
'24_bowl-05-Mar-2016-13-53-41',
'24_bowl-07-Mar-2016-05-06-04',
# These ones may fall over a bit at simulation start (first has off depth maps, others don't) ...
'55_hairdryer_final-18-Nov-2015-13-57-47',
'55_hairdryer_final-15-Dec-2015-12-18-19',
'55_hairdryer_final-09-Dec-2015-09-54-47',
'55_hairdryer_final-19-Nov-2015-09-56-56',
'55_hairdryer_final-21-Nov-2015-05-16-08',
# These frequently do not have object at centre of depth map (various reasons possible) ...
'33_pan_final-11-Mar-2016-17-41-49',
'53_watertap_final-04-Dec-2015-01-28-24',
'53_watertap_final-06-Dec-2015-04-20-45',
'53_watertap_final-15-Nov-2015-05-08-46',
'53_watertap_final-17-Nov-2015-00-26-39',
'53_watertap_final-17-Nov-2015-15-57-57',
'53_watertap_final-19-Jan-2016-04-32-52',
'56_headphones_final-11-Nov-2015-14-14-02',
'64_tongs_final-02-Dec-2015-12-22-36',
'68_toy_final-05-Dec-2015-03-00-07',
'68_toy_final-13-Nov-2015-10-50-34',
'68_toy_final-18-Dec-2015-12-36-41',
'68_toy_final-22-Nov-2015-08-51-12',
'76_mirror_final-06-Dec-2015-03-46-18',
'77_napkinholder_final-28-Nov-2015-13-06-17',
'79_toy_dog_final-03-Dec-2015-08-15-04',
'79_toy_dog_final-20-Jan-2016-06-55-00',
'92_shell_final-26-Feb-2016-17-48-04',
'94_weight_final-27-Feb-2016-15-40-40',
'94_weight_final-29-Feb-2016-17-59-42',
'95_boots_final-01-Mar-2016-16-02-15',
'95_boots_final-01-Mar-2016-16-07-50',
'95_boots_final-02-Mar-2016-13-46-24',
'95_boots_final-02-Mar-2016-13-56-54',
'95_boots_final-15-Nov-2015-06-30-07',
'95_boots_final-20-Nov-2015-09-23-39',
'95_boots_final-21-Nov-2015-04-00-35',
'95_boots_final-23-Dec-2015-15-28-51',
'95_boots_final-28-Feb-2016-18-58-13',
'95_boots_final-28-Feb-2016-18-58-15',
'98_faucet_final-28-Feb-2016-18-32-04',
'98_faucet_final-28-Feb-2016-18-58-23',
'98_faucet_final-28-Feb-2016-18-58-25'
]
objects = []
gripper_pos = []
gripper_orient = []
labels = []
power_pinch = []
skip_count = 0
for line in open(param_filename, "r"):
vals = line.translate(None, '"\n').split(',')
if (vals[0] == 'objfilename'):
pass
elif vals[0][:-4] in bad:
skip_count += 1
else:
objects.append(vals[0])
gripper_orient.append([float(vals[1]), float(vals[2]), float(vals[3])])
gripper_pos.append([float(vals[4]), float(vals[5]), float(vals[6])])
labels.append(int(float(vals[8])))
power_pinch.append(float(vals[7]))
print('Skipped ' + str(skip_count) + '; returning ' + str(len(objects)))
if return_power_pinch:
return objects, gripper_pos, gripper_orient, labels, power_pinch
else:
return objects, gripper_pos, gripper_orient, labels
def make_depth_images(obj_name, pos, rot, obj_dir, image_dir, bottom=0.2, imsize=(80,80),
camera_offset=.45, near_clip=.25, far_clip=.8, support=False):
"""
Saves depth images from perspective of gripper as image files. Default
camera parameters make an exaggerated representation of region in front of hand.
:param obj_name: Name corresponding to .obj file (without path or extension)
:param pos: Positions of perspectives from which to make depth images
:param rot: Rotation matrices of perspectives
:param obj_dir: Directory where .obj files can be found
:param image_dir: Directory in which to store images
"""
obj_filename = obj_dir + obj_name + '.obj'
if support:
verts, faces = loadOBJ('../data/support-box.obj')
else:
verts, faces = loadOBJ(obj_filename)
verts = np.array(verts)
# minz = np.min(verts, axis=0)[2]
# verts[:,2] = verts[:,2] + bottom - minz
min_bounding_box = np.min(verts, axis=0)
max_bounding_box = np.max(verts, axis=0)
# set bounding box horizontal centre to 0,0
verts[:,0] = verts[:,0] - (min_bounding_box[0]+max_bounding_box[0])/2.
verts[:,1] = verts[:,1] - (min_bounding_box[1]+max_bounding_box[1])/2.
# set bottom of bounding box to "bottom"
verts[:,2] = verts[:,2] + bottom - min_bounding_box[2]
d = Display(imsize=imsize)
d.set_perspective(fov=45, near_clip=near_clip, far_clip=far_clip)
for i in range(len(pos)):
d.set_camera_position(pos[i], rot[i], camera_offset)
d.set_mesh(verts, faces) #this must go after set_camera_position
depth = d.read_depth()
distance = get_distance(depth, near_clip, far_clip)
rescaled_distance = np.maximum(0, (distance-camera_offset)/(far_clip-camera_offset))
imfile = image_dir + obj_name + '-' + str(i) + '.png'
Image.fromarray((255.0*rescaled_distance).astype('uint8')).save(imfile)
# scipy.misc.toimage(depth, cmin=0.0, cmax=1.0).save(imfile)
d.close()
def make_random_depths(obj_filename, param_filename, n, im_size=(40,40)):
"""
Creates a dataset of depth maps and corresponding success probabilities
at random interpolated gripper configurations.
"""
verts, faces = loadOBJ(obj_filename)
verts = np.array(verts)
minz = np.min(verts, axis=0)[2]
verts[:,2] = verts[:,2] + 0.2 - minz
points, labels = get_points(param_filename)
d = Display(imsize=im_size)
probs = []
depths = []
for i in range(n):
point = get_interpolated_point(points)
estimate, confidence = get_prob_label(points, labels, point, sigma_p=2*.001, sigma_a=2*(4*np.pi/180))
probs.append(estimate)
gripper_pos = point[:3]
gripper_orient = point[3:]
d.set_camera_position(gripper_pos, gripper_orient, .3)
d.set_mesh(verts, faces) #this must go after set_camera_position
depth = d.read_depth()
depths.append(depth)
d.close()
return np.array(depths), np.array(probs)
def get_interpolated_point(points):
"""
Creates a random point that is interpolated between a pair of nearby points.
"""
p1 = np.random.randint(0, len(points))
p2 = get_closest_index(points, p1, prob_include=.5)
mix_weight = np.random.rand()
result = points[p1]*mix_weight + points[p2]*(1-mix_weight)
return result
def get_closest_index(points, index, prob_include=1):
min_distance = 1000
closest_index = []
for i in range(len(points)):
distance = np.linalg.norm(points[i] - points[index])
if distance < min_distance and i != index and np.random.rand() < prob_include:
min_distance = distance
closest_index = i
return closest_index
def get_points(param_filename):
points = []
labels = []
for line in open(param_filename, "r"):
vals = line.split(',')
points.append([float(vals[0]), float(vals[1]), float(vals[2]),
float(vals[3]), float(vals[4]), float(vals[5])])
labels.append(int(vals[6]))
return np.array(points), labels
def get_prob_label(points, labels, point, sigma_p=.01, sigma_a=(4*np.pi/180)):
"""
Gaussian kernel smoothing of success/failure to estimate success probability.
"""
sigma_p_inv = sigma_p**-1
sigma_a_inv = sigma_a**-1
sigma_inv = np.diag([sigma_p_inv, sigma_p_inv, sigma_p_inv,
sigma_a_inv, sigma_a_inv, sigma_a_inv])
differences = points - point
weights = np.zeros(len(labels))
for i in range(len(labels)):
weights[i] = np.exp( -(1./2) * np.dot(differences[i,:], np.dot(sigma_inv, differences[i,:])) )
estimate = np.sum(weights * np.array(labels).astype(float)) / np.sum(weights)
confidence = np.sum(weights)
# print(confidence)
return estimate, confidence
# def make_random_bowl_depths():
# shapes = ['24_bowl-02-Mar-2016-07-03-29',
# '24_bowl-03-Mar-2016-22-54-50',
# '24_bowl-05-Mar-2016-13-53-41',
# '24_bowl-07-Mar-2016-05-06-04',
# '24_bowl-16-Feb-2016-10-12-27',
# '24_bowl-17-Feb-2016-22-00-34',
# '24_bowl-24-Feb-2016-17-38-53',
# '24_bowl-26-Feb-2016-08-35-29',
# '24_bowl-27-Feb-2016-23-52-43',
# '24_bowl-29-Feb-2016-15-01-53']
#
# # shapes = ['24_bowl-02-Mar-2016-07-03-29']
#
# n = 10000
# for shape in shapes:
# depths, labels = make_random_depths('../data/obj_files/' + shape + '.obj',
# '../data/params/' + shape + '.csv',
# n, im_size=(40,40))
#
# f = file('../data/' + shape + '-random.pkl', 'wb')
# cPickle.dump((depths, labels), f)
# f.close()
# def check_random_bowl_depths():
#
# # shapes = ['24_bowl-02-Mar-2016-07-03-29',
# # '24_bowl-03-Mar-2016-22-54-50',
# # '24_bowl-05-Mar-2016-13-53-41',
# # '24_bowl-07-Mar-2016-05-06-04',
# # '24_bowl-16-Feb-2016-10-12-27',
# # '24_bowl-17-Feb-2016-22-00-34',
# # '24_bowl-24-Feb-2016-17-38-53',
# # '24_bowl-26-Feb-2016-08-35-29',
# # '24_bowl-27-Feb-2016-23-52-43',
# # '24_bowl-29-Feb-2016-15-01-53']
#
# shapes = ['24_bowl-02-Mar-2016-07-03-29']
#
# f = file('../data/' + shapes[0] + '-random.pkl', 'rb')
# (depths, labels) = cPickle.load(f)
# f.close()
#
# print(labels)
#
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import axes3d, Axes3D
#
# depths = depths.astype(float)
# depths[depths > np.max(depths.flatten()) - 1] = np.NaN
#
# X = np.arange(0, depths.shape[1])
# Y = np.arange(0, depths.shape[2])
# X, Y = np.meshgrid(X, Y)
# fig = plt.figure(figsize=(12,6))
# ax1 = fig.add_subplot(1, 2, 1, projection='3d')
# plt.xlabel('x')
# ax2 = fig.add_subplot(1, 2, 2, projection='3d')
# plt.xlabel('x')
# # ax = Axes3D(fig)
# # for i in range(depths.shape[0]):
# s = 5 #pixel stride
# for i in range(n):
# if labels[i] > .5:
# color = 'g'
# ax = ax1
# ax.plot_wireframe(X[::s,::s], Y[::s,::s], depths[i,::s,::s], color=color)
# else:
# color = 'r'
# ax = ax2
# if np.random.rand(1) < .5:
# ax.plot_wireframe(X[::s,::s], Y[::s,::s], depths[i,::s,::s], color=color)
# # plt.title(str(i) + ': ' + str(labels[i]))
# plt.show()
def plot_box_corners():
verts, faces = loadOBJ('../data/support-box.obj')
verts = np.array(verts)
print(verts.shape)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1, projection='3d')
ax.scatter(verts[:,0], verts[:,1], verts[:,2])
plt.show()
# def save_bowl_and_box_depths():
# shapes = [
# '24_bowl-16-Feb-2016-10-12-27',
# '24_bowl-17-Feb-2016-22-00-34',
# '24_bowl-24-Feb-2016-17-38-53',
# '24_bowl-26-Feb-2016-08-35-29',
# '24_bowl-27-Feb-2016-23-52-43',
# '24_bowl-29-Feb-2016-15-01-53']
#
# import time
# for shape in shapes:
# print('Processing ' + shape)
# start_time = time.time()
# depths, labels = make_depth_from_gripper('../data/obj_files/' + shape + '.obj',
# '../data/params/' + shape + '.csv',
# bottom=0.2)
# box_depths, _ = make_depth_from_gripper('../data/support-box.obj',
# '../data/params/' + shape + '.csv',
# bottom=0)
# f = file('../data/depths/' + shape + '.pkl', 'wb')
# cPickle.dump((depths, box_depths, labels), f)
# f.close()
# print(' ' + str(time.time() - start_time) + 's')
def check_bowl_and_box_variance():
f = file('../data/depths/' + '24_bowl-16-Feb-2016-10-12-27' + '.pkl', 'rb')
depths, box_depths, labels = cPickle.load(f)
f.close()
# print(labels)
# print(depths.shape)
# print(box_depths.shape)
obj_sd = []
box_sd = []
for i in range(len(labels)):
obj_sd.append(np.std(depths[i,:,:].flatten()))
box_sd.append(np.std(box_depths[i,:,:].flatten()))
import matplotlib.pyplot as plt
plt.plot(obj_sd)
plt.plot(box_sd)
plt.show()
def plot_bowl_and_box_distance_example():
f = file('../data/depths/' + '24_bowl-16-Feb-2016-10-12-27' + '.pkl', 'rb')
depths, box_depths, labels = cPickle.load(f)
f.close()
from depthmap import get_distance
distances = get_distance(depths, .2, 1.0)
box_distances = get_distance(box_depths, .2, 1.0)
distances[distances > .99] = None
box_distances[box_distances > .99] = None
from mpl_toolkits.mplot3d import axes3d, Axes3D
X = np.arange(0, depths.shape[1])
Y = np.arange(0, depths.shape[2])
X, Y = np.meshgrid(X, Y)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='3d')
ax.plot_wireframe(X, Y, distances[205,:,:], color='b')
ax.plot_wireframe(X, Y, box_distances[205,:,:], color='r')
plt.show()
def load_depth_image(directory, object, index):
imfile = directory + object + str(index) + '.png'
image = scipy.ndimage.imread(imfile, flatten=True)
def get_pos_rot(objects, gripper_pos, gripper_orient, obj):
"""
Get positions and rotation matrices for a given object, from a parameter list for
all objects.
"""
pos = []
rot = []
for i in range(len(objects)):
if objects[i] == obj:
pos.append(gripper_pos[i])
rot.append(rot_matrix(gripper_orient[i][0], gripper_orient[i][1], gripper_orient[i][2]))
return np.array(pos), np.array(rot)
def process_directory(obj_dir, image_dir, support=False):
from os import listdir
from os.path import isfile, join
import time
objects, gripper_pos, gripper_orient, labels = load_all_params('../../grasp-conv/data/output_data.csv')
bottom = 0 if support else 0.2
for f in listdir(obj_dir):
obj_filename = join(obj_dir, f)
if isfile(obj_filename) and f.endswith('.obj'):
print('Processing ' + f)
start_time = time.time()
pos, rot = get_pos_rot(objects, gripper_pos, gripper_orient, f)
make_depth_images(f[:-4], pos, rot, obj_dir, image_dir, bottom=bottom, support=support)
print(' ' + str(time.time()-start_time) + 's')
def calculate_grasp_metrics_for_directory(image_dir, im_width=80,
camera_offset=.45, near_clip=.25, far_clip=.8):
from os import listdir
from os.path import isfile, join
import time
from heuristic import finger_path_template
from heuristic import calculate_grip_metrics
template = finger_path_template(45.*np.pi/180., im_width, camera_offset)
all_intersections = []
all_qualities = []
all_files = []
for f in listdir(image_dir):
image_filename = join(image_dir, f)
if isfile(image_filename) and f.endswith('.png'):
# print('Processing ' + image_filename)
image = scipy.misc.imread(image_filename)
rescaled_distance = image / 255.0
distance = rescaled_distance*(far_clip-camera_offset)+camera_offset
# from mpl_toolkits.mplot3d import axes3d, Axes3D
# X = np.arange(0, im_width)
# Y = np.arange(0, im_width)
# X, Y = np.meshgrid(X, Y)
# fig = plt.figure()
# distance[distance > camera_offset + .3] = None
# template[template < camera_offset] = None
# ax = fig.add_subplot(1,1,1,projection='3d')
# ax.plot_wireframe(X, Y, distance)
# ax.plot_wireframe(X, Y, template, color='r')
# ax.set_xlabel('x')
# plt.show()
intersections, qualities = calculate_grip_metrics(distance, template)
# print(intersections)
# print(qualities)
all_intersections.append(intersections)
all_qualities.append(qualities)
all_files.append(f)
return all_intersections, all_qualities, all_files
def calculate_grasp_metric_maps_for_directory(image_dir, dest_dir, im_width=80,
camera_offset=.45, near_clip=.25, far_clip=.8):
from os import listdir
from os.path import isfile, join
from heuristic import finger_path_template
finger_path = finger_path_template(45.*np.pi/180., im_width, camera_offset)
for f in listdir(image_dir):
image_filename = join(image_dir, f)
if isfile(image_filename) and f.endswith('.png'):
print('Processing ' + image_filename)
image = scipy.misc.imread(image_filename)
rescaled_distance = image / 255.0
distance = rescaled_distance*(far_clip-camera_offset)+camera_offset
mm = calculate_metric_map(distance, finger_path, 1)
imfile = dest_dir + f[:-4] + '-map' + '.png'
Image.fromarray((255.0*mm).astype('uint8')).save(imfile)
def calculate_overlap_for_directory(image_dir, dest_dir, im_width=80,
camera_offset=.45, far_clip=.8):
from os import listdir
from os.path import isfile, join
from heuristic import finger_path_template
finger_path = finger_path_template(45.*np.pi/180., im_width, camera_offset)
c = 0
for f in listdir(image_dir):
image_filename = join(image_dir, f)
if isfile(image_filename) and f.endswith('.png'):
if c % 1000 == 0:
print('Processing ' + image_filename)
c += 1
image = scipy.misc.imread(image_filename)
rescaled_distance = image / 255.0
distance = rescaled_distance*(far_clip-camera_offset)+camera_offset
overlap = np.maximum(0, finger_path - distance)
imfile = dest_dir + f[:-4] + '-overlap' + '.png'
# we divide by 15.4cm because it's the max overlap due to gripper geometry
Image.fromarray((255.0/.154*overlap).astype('uint8')).save(imfile)
def compress_images(directory, extension, name='zip'):
"""
We need this to transfer data to server.
"""
from os import listdir
from os.path import isfile, join
from zipfile import ZipFile
n_per_zip = 50000
# with ZipFile('zip-all.zip', 'w') as zf:
# for f in listdir(directory):
# image_filename = join(directory, f)
# if isfile(image_filename) and f.endswith(extension):
# zf.write(image_filename)
zip_index = 0
file_index = 0
for f in listdir(directory):
image_filename = join(directory, f)
if isfile(image_filename) and f.endswith(extension):
if file_index == 0:
zf = ZipFile(name + str(zip_index) + '.zip', 'w')
zf.write(image_filename)
file_index += 1
if file_index == n_per_zip:
print('writing file ' + str(zip_index))
file_index = 0
zf.close()
zip_index += 1
zf.close()
if __name__ == '__main__':
# save_bowl_and_box_depths()
# plot_bowl_and_box_distance_example()
# compress_images('../../grasp-conv/data/support_depths/', '.png')
# compress_images('../../grasp-conv/data/obj_depths/', '.png')
# compress_images('../../grasp-conv/data/obj_mm/', '.png')
# compress_images('../../grasp-conv/data/support_overlap/', '.png', name='support-overlap')
# compress_images('../../grasp-conv/data/obj_overlap/', '.png', name='obj-overlap')
compress_images('/Volumes/TrainingData/grasp-conv/data/eye-perspectives', '.png', name='eye-perspectives')
# calculate_grasp_metric_maps_for_directory('../../grasp-conv/data/obj_depths/', '../../grasp-conv/data/obj_mm/')
# image = scipy.misc.imread('../../grasp-conv/data/obj_mm/104_toaster_final-18-Dec-2015-13-56-59-0-map.png')
# mm = image / 255.0
# print(np.min(mm))
# print(np.max(mm))
# objects, gripper_pos, gripper_orient, labels = load_all_params('../../grasp-conv/data/output_data.csv')
# obj_dir = '../../grasp-conv/data/obj_files/'
# process_directory(obj_dir, '../../grasp-conv/data/obj_depths/')
# # process_directory(obj_dir, '../../grasp-conv/data/support_depths/', support=True)
#
# # intersections, qualities, files = calculate_grasp_metrics_for_directory('../../grasp-conv/data/support_depths/')
# intersections, qualities, files = calculate_grasp_metrics_for_directory('../../grasp-conv/data/obj_depths/')
# f = file('../data/metrics-objects.pkl', 'wb')
# cPickle.dump((intersections, qualities, files), f)
# f.close()
# calculate_overlap_for_directory('../../grasp-conv/data/obj_depths/', '../../grasp-conv/data/obj_overlap/')
# calculate_overlap_for_directory('../../grasp-conv/data/support_depths/', '../../grasp-conv/data/support_overlap/')
# f = file('metrics.pkl', 'rb')
# intersections, qualities = cPickle.load(f)
# f.close()
# print(intersections)
# print(qualities)
# from mpl_toolkits.mplot3d import axes3d, Axes3D
# X = np.arange(0, 80)
# Y = np.arange(0, 80)
# X, Y = np.meshgrid(X, Y)
# fig = plt.figure()
# ax = fig.add_subplot(1,1,1,projection='3d')
# ax.plot_wireframe(X, Y, foo)
# ax.set_xlabel('x')
# plt.show()
| bptripp/grasp-convnet | py/data.py | Python | mit | 26,522 | [
"Gaussian"
] | 80d9647c2a8237a9cc8d0b6f897d942225f2352ad93efe7f3588beb3b5831f8c |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from Plugins.Extensions.OpenWebif.local import tstrings
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1447321436.388763
__CHEETAH_genTimestamp__ = 'Thu Nov 12 18:43:56 2015'
__CHEETAH_src__ = '/home/knuth/openpli-oe-core/build/tmp/work/fusionhd-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+5837c87afc-r0/git/plugin/controllers/views/mobile/movies.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Nov 12 18:43:41 2015'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class movies(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(movies, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<html>\r
<head>\r
\t<title>OpenWebif</title>\r
\t<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />\r
\t<meta name="viewport" content="user-scalable=no, width=device-width"/>\r
\t<meta name="apple-mobile-web-app-capable" content="yes" />\r
\t<link rel="stylesheet" type="text/css" href="/css/jquery.mobile-1.0.min.css" media="screen"/>\r
\t<link rel="stylesheet" type="text/css" href="/css/iphone.css" media="screen"/>\r
\t<script src="/js/jquery-1.6.2.min.js"></script>\r
\t<script src="/js/jquery.mobile-1.0.min.js"></script>\r
</head>\r
<body> \r
\t<div data-role="page">\r
\r
\t\t<div id="header">\r
\t\t\t<div class="button" onClick="history.back()">''')
_v = VFFSL(SL,"tstrings",True)['back'] # u"$tstrings['back']" on line 17, col 49
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['back']")) # from line 17, col 49.
write(u'''</div>\r
\t\t\t<h1><a style="color:#FFF;text-decoration:none;" href=\'/mobile\'>OpenWebif</a></h1>
\t\t\t<h2>''')
_v = VFFSL(SL,"tstrings",True)['movies'] # u"$tstrings['movies']" on line 19, col 8
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['movies']")) # from line 19, col 8.
write(u'''</h2>\r
\t\t</div>\r
\r
\t\t<div data-role="fieldcontain">\r
\t\t <select name="select-choice-1" id="select-choice-moviedir" onChange="window.location.href=\'/mobile/movies?dirname=\'+escape(options[selectedIndex].value);">\r
\t\t\t <option value="''')
_v = VFFSL(SL,"directory",True) # u'$directory' on line 24, col 21
if _v is not None: write(_filter(_v, rawExpr=u'$directory')) # from line 24, col 21.
write(u'''">''')
_v = VFFSL(SL,"directory",True) # u'$directory' on line 24, col 33
if _v is not None: write(_filter(_v, rawExpr=u'$directory')) # from line 24, col 33.
write(u'''</option>\r
''')
for bookmark in VFFSL(SL,"bookmarks",True): # generated from line 25, col 6
write(u'''\t\t\t <option value="''')
_v = VFFSL(SL,"bookmark",True) # u'$bookmark' on line 26, col 21
if _v is not None: write(_filter(_v, rawExpr=u'$bookmark')) # from line 26, col 21.
write(u'''">''')
_v = VFFSL(SL,"bookmark",True) # u'$bookmark' on line 26, col 32
if _v is not None: write(_filter(_v, rawExpr=u'$bookmark')) # from line 26, col 32.
write(u'''</option>\r
''')
write(u'''\t\t </select>\r
\t\t</div>\r
\r
\t\t<div id="contentContainer">\r
\t\t\t<ul data-role="listview" data-inset="true" data-theme="d">\r
\t\t\t\t<li data-role="list-divider" role="heading" data-theme="b">''')
_v = VFFSL(SL,"tstrings",True)['movies'] # u"$tstrings['movies']" on line 33, col 64
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['movies']")) # from line 33, col 64.
write(u'''</li>\r
''')
for movie in VFFSL(SL,"movies",True): # generated from line 34, col 5
if VFFSL(SL,"movie.eventname",True) != "": # generated from line 35, col 5
write(u'''\t\t\t\t<li>''')
_v = VFFSL(SL,"movie.eventname",True) # u'$movie.eventname' on line 36, col 9
if _v is not None: write(_filter(_v, rawExpr=u'$movie.eventname')) # from line 36, col 9.
write(u'''</li>\r
''')
else: # generated from line 37, col 5
write(u'''\t\t\t\t<li>''')
_v = VFFSL(SL,"movie.filename",True) # u'$movie.filename' on line 38, col 9
if _v is not None: write(_filter(_v, rawExpr=u'$movie.filename')) # from line 38, col 9.
write(u'''</li>\r
''')
write(u'''\t\t\t</ul>\r
\t\t</div>\r
\r
\t\t<div id="footer">\r
\t\t\t<p>OpenWebif Mobile</p>\r
\t\t\t<a onclick="document.location.href=\'/index?mode=fullpage\';return false;" href="#">''')
_v = VFFSL(SL,"tstrings",True)['show_full_openwebif'] # u"$tstrings['show_full_openwebif']" on line 46, col 86
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['show_full_openwebif']")) # from line 46, col 86.
write(u'''</a>\r
\t\t</div>\r
\t\t\r
\t</div>\r
</body>\r
</html>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_movies= 'respond'
## END CLASS DEFINITION
if not hasattr(movies, '_initCheetahAttributes'):
templateAPIClass = getattr(movies, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(movies)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=movies()).run()
| pli3/e2-openwbif | plugin/controllers/views/mobile/movies.py | Python | gpl-2.0 | 8,410 | [
"VisIt"
] | dbc9515ecf63a2eff8861b5e72094313a171a6c147586766bff8d24d5ebe7be3 |
#!/usr/bin/env python3.4
__author__ = 'mdc_hk'
version = '1.0'
import logging, os, re, sys
import pandas as pd
from pandas import Series
from bs4 import BeautifulSoup
import numpy as np
from scipy import stats
logging.basicConfig(filename='Log.txt', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
#===========================================================================================================
try:
filein=sys.argv[1]
xmlResequencingRunStatistics=sys.argv[2]
reference_annotated_fa = sys.argv[3]
confidenceValue = sys.argv[4]
proceed = True
except:
print('\nProgram: varValidity\nVersion: {}\n'.format(version) + 'Usage: ' + sys.argv[0] + ' <input_txt_file> '
'<ResequencingRunStatistics_xml> <reference_annotated_fa> <Confidence value>')
print('Examples:', sys.argv[0], 'sample001_table.txt', 'ResequencingRunStatistics.xml', 'reference_annotated.fa',
'0.9500')
proceed = False
if proceed == True:
programFolder = os.path.expanduser('~/FluSeq/')
dataFromTable = pd.read_table(filein, sep='\t')
columnID = dataFromTable.columns[7]
try:
# dfStat = pd.read_csv('ms2SysCtrl.db', sep=',')
dfStat = pd.read_csv(programFolder + 'ms2SysCtrl.db', sep=',')
dfStat = dfStat.set_index(['GeneSegment', 'POS'])
except IOError:
errorFile = open('Error.txt', 'a')
errorFile.write('Please make sure the MS2SysCtrl.db is placed in the correct PATH' + '\n')
errorFile.close()
exit(1)
# The statistics for contamination starts here.
def expectedConR(x2, gene, pos):
dfStat_tmp = dfStat.ix[gene, pos]
x = dfStat_tmp['PFreads_inMillion']
y = dfStat_tmp['Total_Depth']
# Modeling with Numpy
p, cov = np.polyfit(x,y,1,cov=True) # parameters and covariance from of the fit
y_model = np.polyval(p, x) # model using the fit parameters; NOTE: parameters here are coefficients
# Statistics
n = y.size # number of observations
m = p.size # number of parameters
DF = n - m # degrees of freedom
t = stats.t.ppf(float(confidenceValue), n - m) # used for CI and PI bands
# Estimates of Error in Data/Model
resid = y - y_model
s_err = np.sqrt(np.sum(resid**2)/(DF)) # standard deviation of the error
y2 = np.polyval(p, x2)
# Prediction Interval
PI = t*s_err*np.sqrt(1+1/n+(x2-np.mean(x))**2/np.sum((x-np.mean(x))**2))
uppConfLimit_ContRead = y2 + PI
return uppConfLimit_ContRead
### To import the main table result
dataFromTable['SAMPLE'] = columnID[:-3]
dataFromTable = dataFromTable.set_index(['SAMPLE'])
### to create table/library to get the percentage of reads identified for each sample
try:
bsObj = BeautifulSoup(open(xmlResequencingRunStatistics), 'lxml-xml')
xmlOfSamples = bsObj.findAll(re.compile(r'SummarizedSampleStatis(t)*ics'))
except IOError:
errorFile = open('Error.txt', 'a')
errorFile.write('Please make sure the run-specific ResequencingRunStatistics.xml is placed in the run folder'
+ '\n')
errorFile.close()
exit(1)
listOfSamples = []
for name in xmlOfSamples:
listOfSamples.append(name.SampleName.get_text())
listOfPFReads = []
for name in xmlOfSamples:
listOfPFReads.append(round(int(name.NumberOfClustersPF.get_text())/1000000, 2))
if len(listOfSamples) == len(listOfPFReads):
dictOfSamplesAndPFreads = dict(zip(listOfSamples, listOfPFReads))
dataFromTable['PFreads_inMillion'] = dictOfSamplesAndPFreads[columnID[:-3]]
### to set the index as 'CHROM' for easy gene data separation
dataFromTable = dataFromTable.set_index(['CHROM'], drop=False)
geneList = []
def validityTester(gene):
validityOfWildtype = []
for index, row in gene.iterrows():
if row['refCopy'] > row['UppPredLimit_ContRead']:
validityOfWildtype.append(1)
else:
validityOfWildtype.append(0)
validityOfAlt1 = []
for index, row in gene.iterrows():
if row['alt1Copy'] > row['UppPredLimit_ContRead']:
validityOfAlt1.append(2)
else:
validityOfAlt1.append(0)
validityOfAlt2 = []
for index, row in gene.iterrows():
if row['alt2Copy'] > row['UppPredLimit_ContRead']:
validityOfAlt2.append(4)
else:
validityOfAlt2.append(0)
scoring = [x + y + z for x, y, z in zip(validityOfWildtype, validityOfAlt1, validityOfAlt2)]
Ref = list(gene['REF'])
Alt12 = gene['ALT'].str.split(",").apply(Series, 1)
if len(Alt12.columns) == 1:
Alt12[1] = ''
Alt1 = list(Alt12[0])
Alt2 = list(Alt12[1])
refCopyNumber = list(gene['refCopy'])
alt1CopyNumber = list(gene['alt1Copy'])
alt2CopyNumber = list(gene['alt2Copy'])
call = []
percentage = []
for i in range(len(scoring)):
if scoring[i] == 0:
call.append('Invalid')
percentage.append(np.NaN)
elif scoring[i] == 1:
call.append(Ref[i])
percentage.append(1.00)
elif scoring[i] == 2:
call.append(Alt1[i])
percentage.append(1.00)
elif scoring[i] == 3:
call.append(Ref[i]+','+Alt1[i])
percentage.append(alt1CopyNumber[i]/(alt1CopyNumber[i]+refCopyNumber[i]))
elif scoring[i] == 4:
call.append(Alt2[i])
percentage.append(1.00)
elif scoring[i] == 5:
call.append(Ref[i]+','+Alt2[i])
percentage.append(alt2CopyNumber/(alt2CopyNumber[i]+refCopyNumber[i]))
elif scoring[i] == 6:
call.append(Alt1[i]+','+Alt2[i])
percentage.append(alt2CopyNumber[i]/(alt1CopyNumber[i]+alt2CopyNumber[i]))
elif scoring[i] == 7:
call.append(Ref[i]+','+Alt1[i]+','+Alt2[i])
percentage.append(alt2CopyNumber[i]/(alt1CopyNumber[i]+alt2CopyNumber[i]))
gene['Call(R1,R2)'] = call
gene['Percentage(R2/R1+R2)'] = [ '%.3f' % elem for elem in percentage ]
return
def geneAnalysis(Chrom, Gene):
GeneData = dataFromTable.ix[Chrom]
GeneData = GeneData.reset_index(drop=True)
UppPredLimit_ContRead = []
for i in GeneData.index:
if GeneData.ix[i]['POS'] <= 225:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 151))
elif 226 <= GeneData.ix[i]['POS'] <= 450:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 301))
elif 451 <= GeneData.ix[i]['POS'] <= 525:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 451))
elif 526 <= GeneData.ix[i]['POS'] <= 675:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 601))
elif 676 <= GeneData.ix[i]['POS'] <= 825:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 751))
elif 826 <= GeneData.ix[i]['POS'] <= 975:
try:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 901))
except:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 751))
elif 976 <= GeneData.ix[i]['POS'] <= 1125:
try:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 1051))
except:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 901))
elif 1126 <= GeneData.ix[i]['POS'] <= 1275:
try:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 1201))
except:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 1051))
elif 1276 <= GeneData.ix[i]['POS'] <= 1425:
try:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 1351))
except:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 1201))
elif 1426 <= GeneData.ix[i]['POS'] <= 1575:
try:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 1501))
except:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 1351))
elif 1576 <= GeneData.ix[i]['POS'] <= 1725:
try:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 1651))
except:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 1501))
elif 1726 <= GeneData.ix[i]['POS'] <= 1801:
try:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 1801))
except:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 1651))
elif 1802 <= GeneData.ix[i]['POS'] <= 1951:
try:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 1951))
except:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 1801))
elif 1952 <= GeneData.ix[i]['POS'] <= 2101:
try:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 2101))
except:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 1951))
else:
try:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 2251))
except:
UppPredLimit_ContRead.append(expectedConR(GeneData.ix[i]['PFreads_inMillion'], Gene, 2101))
GeneData['UppPredLimit_ContRead'] = UppPredLimit_ContRead
GeneData_tojoin = GeneData[(columnID[:-3]+'.AD')].str.split(",").apply(Series, 1)
if len(GeneData_tojoin.columns) == 2:
GeneData_tojoin[2] = np.NaN
GeneData_tojoin.columns = ['refCopy', 'alt1Copy', 'alt2Copy']
GeneData = pd.merge(GeneData, GeneData_tojoin, left_index=True, right_index=True)
GeneData.refCopy = GeneData.refCopy.astype(float)
GeneData.alt1Copy = GeneData.alt1Copy.astype(float)
GeneData.alt2Copy = GeneData.alt2Copy.astype(float)
GeneData = GeneData.set_index('POS')
validityTester(GeneData)
geneList.append(GeneData)
return
with open(reference_annotated_fa,'r') as refFile:
sequences = refFile.read()
segmentRegex = re.compile(r'((Segment\d)_[\S]*)')
segment = segmentRegex.findall(sequences)
for seg in segment:
try:
geneAnalysis(seg[0], seg[1])
except:
logging.info(seg[1] + ' of ' + columnID[:-3] + ' was not analysed')
errorFile = open('Error.txt', 'a')
errorFile.write(seg[1] + ' of ' + columnID[:-3] + ' was not analysed. Suggested solution: Something wrong '
'with the formating of the AllGenes.xls file' + '\n')
errorFile.close()
try:
pd.concat(geneList, axis=0).to_excel(columnID[:-3] + "_AllGenes" + ".xls")
except ValueError:
print('All segments of sample ' + columnID[:-3] + ' were not analysed')
logging.info('All segments of sample ' + columnID[:-3] + ' were not analysed. '
'Suggested solution: please use bwa-0.6.2 but not the latest version')
errorFile = open('Error.txt', 'a')
errorFile.write('All gene segments of sample ' + columnID[:-3] + ' were not analysed. '
'Suggested solution: please use bwa-0.6.2 but not the latest version' + '\n')
errorFile.close() | hkailee/FluSeq | varValidity.py | Python | mit | 12,778 | [
"BWA"
] | 50f5dee06a2040e7e7dc1cdc4c1865139776b3bac00531aeadfe6f13e87a2655 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.channel_v1.services.cloud_channel_service import (
CloudChannelServiceAsyncClient,
)
from google.cloud.channel_v1.services.cloud_channel_service import (
CloudChannelServiceClient,
)
from google.cloud.channel_v1.services.cloud_channel_service import pagers
from google.cloud.channel_v1.services.cloud_channel_service import transports
from google.cloud.channel_v1.types import channel_partner_links
from google.cloud.channel_v1.types import common
from google.cloud.channel_v1.types import customers
from google.cloud.channel_v1.types import entitlements
from google.cloud.channel_v1.types import offers
from google.cloud.channel_v1.types import operations
from google.cloud.channel_v1.types import products
from google.cloud.channel_v1.types import service
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import any_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.type import postal_address_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert CloudChannelServiceClient._get_default_mtls_endpoint(None) is None
assert (
CloudChannelServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
CloudChannelServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
CloudChannelServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
CloudChannelServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
CloudChannelServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class", [CloudChannelServiceClient, CloudChannelServiceAsyncClient,]
)
def test_cloud_channel_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "cloudchannel.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.CloudChannelServiceGrpcTransport, "grpc"),
(transports.CloudChannelServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_cloud_channel_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [CloudChannelServiceClient, CloudChannelServiceAsyncClient,]
)
def test_cloud_channel_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "cloudchannel.googleapis.com:443"
def test_cloud_channel_service_client_get_transport_class():
transport = CloudChannelServiceClient.get_transport_class()
available_transports = [
transports.CloudChannelServiceGrpcTransport,
]
assert transport in available_transports
transport = CloudChannelServiceClient.get_transport_class("grpc")
assert transport == transports.CloudChannelServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
CloudChannelServiceClient,
transports.CloudChannelServiceGrpcTransport,
"grpc",
),
(
CloudChannelServiceAsyncClient,
transports.CloudChannelServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
CloudChannelServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CloudChannelServiceClient),
)
@mock.patch.object(
CloudChannelServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CloudChannelServiceAsyncClient),
)
def test_cloud_channel_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(CloudChannelServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(CloudChannelServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
CloudChannelServiceClient,
transports.CloudChannelServiceGrpcTransport,
"grpc",
"true",
),
(
CloudChannelServiceAsyncClient,
transports.CloudChannelServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
CloudChannelServiceClient,
transports.CloudChannelServiceGrpcTransport,
"grpc",
"false",
),
(
CloudChannelServiceAsyncClient,
transports.CloudChannelServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
CloudChannelServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CloudChannelServiceClient),
)
@mock.patch.object(
CloudChannelServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CloudChannelServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_cloud_channel_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [CloudChannelServiceClient, CloudChannelServiceAsyncClient]
)
@mock.patch.object(
CloudChannelServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CloudChannelServiceClient),
)
@mock.patch.object(
CloudChannelServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CloudChannelServiceAsyncClient),
)
def test_cloud_channel_service_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
CloudChannelServiceClient,
transports.CloudChannelServiceGrpcTransport,
"grpc",
),
(
CloudChannelServiceAsyncClient,
transports.CloudChannelServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_cloud_channel_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
CloudChannelServiceClient,
transports.CloudChannelServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
CloudChannelServiceAsyncClient,
transports.CloudChannelServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_cloud_channel_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_cloud_channel_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.channel_v1.services.cloud_channel_service.transports.CloudChannelServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = CloudChannelServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
CloudChannelServiceClient,
transports.CloudChannelServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
CloudChannelServiceAsyncClient,
transports.CloudChannelServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_cloud_channel_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"cloudchannel.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/apps.order",),
scopes=None,
default_host="cloudchannel.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [service.ListCustomersRequest, dict,])
def test_list_customers(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_customers), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListCustomersResponse(
next_page_token="next_page_token_value",
)
response = client.list_customers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListCustomersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListCustomersPager)
assert response.next_page_token == "next_page_token_value"
def test_list_customers_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_customers), "__call__") as call:
client.list_customers()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListCustomersRequest()
@pytest.mark.asyncio
async def test_list_customers_async(
transport: str = "grpc_asyncio", request_type=service.ListCustomersRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_customers), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListCustomersResponse(next_page_token="next_page_token_value",)
)
response = await client.list_customers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListCustomersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListCustomersAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_customers_async_from_dict():
await test_list_customers_async(request_type=dict)
def test_list_customers_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListCustomersRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_customers), "__call__") as call:
call.return_value = service.ListCustomersResponse()
client.list_customers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_customers_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListCustomersRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_customers), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListCustomersResponse()
)
await client.list_customers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_customers_pager(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_customers), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListCustomersResponse(
customers=[
customers.Customer(),
customers.Customer(),
customers.Customer(),
],
next_page_token="abc",
),
service.ListCustomersResponse(customers=[], next_page_token="def",),
service.ListCustomersResponse(
customers=[customers.Customer(),], next_page_token="ghi",
),
service.ListCustomersResponse(
customers=[customers.Customer(), customers.Customer(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_customers(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, customers.Customer) for i in results)
def test_list_customers_pages(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_customers), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListCustomersResponse(
customers=[
customers.Customer(),
customers.Customer(),
customers.Customer(),
],
next_page_token="abc",
),
service.ListCustomersResponse(customers=[], next_page_token="def",),
service.ListCustomersResponse(
customers=[customers.Customer(),], next_page_token="ghi",
),
service.ListCustomersResponse(
customers=[customers.Customer(), customers.Customer(),],
),
RuntimeError,
)
pages = list(client.list_customers(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_customers_async_pager():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_customers), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListCustomersResponse(
customers=[
customers.Customer(),
customers.Customer(),
customers.Customer(),
],
next_page_token="abc",
),
service.ListCustomersResponse(customers=[], next_page_token="def",),
service.ListCustomersResponse(
customers=[customers.Customer(),], next_page_token="ghi",
),
service.ListCustomersResponse(
customers=[customers.Customer(), customers.Customer(),],
),
RuntimeError,
)
async_pager = await client.list_customers(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, customers.Customer) for i in responses)
@pytest.mark.asyncio
async def test_list_customers_async_pages():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_customers), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListCustomersResponse(
customers=[
customers.Customer(),
customers.Customer(),
customers.Customer(),
],
next_page_token="abc",
),
service.ListCustomersResponse(customers=[], next_page_token="def",),
service.ListCustomersResponse(
customers=[customers.Customer(),], next_page_token="ghi",
),
service.ListCustomersResponse(
customers=[customers.Customer(), customers.Customer(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_customers(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [service.GetCustomerRequest, dict,])
def test_get_customer(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_customer), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = customers.Customer(
name="name_value",
org_display_name="org_display_name_value",
alternate_email="alternate_email_value",
domain="domain_value",
cloud_identity_id="cloud_identity_id_value",
language_code="language_code_value",
channel_partner_id="channel_partner_id_value",
)
response = client.get_customer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.GetCustomerRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, customers.Customer)
assert response.name == "name_value"
assert response.org_display_name == "org_display_name_value"
assert response.alternate_email == "alternate_email_value"
assert response.domain == "domain_value"
assert response.cloud_identity_id == "cloud_identity_id_value"
assert response.language_code == "language_code_value"
assert response.channel_partner_id == "channel_partner_id_value"
def test_get_customer_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_customer), "__call__") as call:
client.get_customer()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.GetCustomerRequest()
@pytest.mark.asyncio
async def test_get_customer_async(
transport: str = "grpc_asyncio", request_type=service.GetCustomerRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_customer), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
customers.Customer(
name="name_value",
org_display_name="org_display_name_value",
alternate_email="alternate_email_value",
domain="domain_value",
cloud_identity_id="cloud_identity_id_value",
language_code="language_code_value",
channel_partner_id="channel_partner_id_value",
)
)
response = await client.get_customer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.GetCustomerRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, customers.Customer)
assert response.name == "name_value"
assert response.org_display_name == "org_display_name_value"
assert response.alternate_email == "alternate_email_value"
assert response.domain == "domain_value"
assert response.cloud_identity_id == "cloud_identity_id_value"
assert response.language_code == "language_code_value"
assert response.channel_partner_id == "channel_partner_id_value"
@pytest.mark.asyncio
async def test_get_customer_async_from_dict():
await test_get_customer_async(request_type=dict)
def test_get_customer_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetCustomerRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_customer), "__call__") as call:
call.return_value = customers.Customer()
client.get_customer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_customer_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetCustomerRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_customer), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(customers.Customer())
await client.get_customer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_customer_flattened():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_customer), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = customers.Customer()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_customer(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_customer_flattened_error():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_customer(
service.GetCustomerRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_customer_flattened_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_customer), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = customers.Customer()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(customers.Customer())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_customer(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_customer_flattened_error_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_customer(
service.GetCustomerRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [service.CheckCloudIdentityAccountsExistRequest, dict,]
)
def test_check_cloud_identity_accounts_exist(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.check_cloud_identity_accounts_exist), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.CheckCloudIdentityAccountsExistResponse()
response = client.check_cloud_identity_accounts_exist(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.CheckCloudIdentityAccountsExistRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, service.CheckCloudIdentityAccountsExistResponse)
def test_check_cloud_identity_accounts_exist_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.check_cloud_identity_accounts_exist), "__call__"
) as call:
client.check_cloud_identity_accounts_exist()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.CheckCloudIdentityAccountsExistRequest()
@pytest.mark.asyncio
async def test_check_cloud_identity_accounts_exist_async(
transport: str = "grpc_asyncio",
request_type=service.CheckCloudIdentityAccountsExistRequest,
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.check_cloud_identity_accounts_exist), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.CheckCloudIdentityAccountsExistResponse()
)
response = await client.check_cloud_identity_accounts_exist(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.CheckCloudIdentityAccountsExistRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, service.CheckCloudIdentityAccountsExistResponse)
@pytest.mark.asyncio
async def test_check_cloud_identity_accounts_exist_async_from_dict():
await test_check_cloud_identity_accounts_exist_async(request_type=dict)
def test_check_cloud_identity_accounts_exist_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.CheckCloudIdentityAccountsExistRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.check_cloud_identity_accounts_exist), "__call__"
) as call:
call.return_value = service.CheckCloudIdentityAccountsExistResponse()
client.check_cloud_identity_accounts_exist(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_check_cloud_identity_accounts_exist_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.CheckCloudIdentityAccountsExistRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.check_cloud_identity_accounts_exist), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.CheckCloudIdentityAccountsExistResponse()
)
await client.check_cloud_identity_accounts_exist(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [service.CreateCustomerRequest, dict,])
def test_create_customer(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_customer), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = customers.Customer(
name="name_value",
org_display_name="org_display_name_value",
alternate_email="alternate_email_value",
domain="domain_value",
cloud_identity_id="cloud_identity_id_value",
language_code="language_code_value",
channel_partner_id="channel_partner_id_value",
)
response = client.create_customer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.CreateCustomerRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, customers.Customer)
assert response.name == "name_value"
assert response.org_display_name == "org_display_name_value"
assert response.alternate_email == "alternate_email_value"
assert response.domain == "domain_value"
assert response.cloud_identity_id == "cloud_identity_id_value"
assert response.language_code == "language_code_value"
assert response.channel_partner_id == "channel_partner_id_value"
def test_create_customer_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_customer), "__call__") as call:
client.create_customer()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.CreateCustomerRequest()
@pytest.mark.asyncio
async def test_create_customer_async(
transport: str = "grpc_asyncio", request_type=service.CreateCustomerRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_customer), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
customers.Customer(
name="name_value",
org_display_name="org_display_name_value",
alternate_email="alternate_email_value",
domain="domain_value",
cloud_identity_id="cloud_identity_id_value",
language_code="language_code_value",
channel_partner_id="channel_partner_id_value",
)
)
response = await client.create_customer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.CreateCustomerRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, customers.Customer)
assert response.name == "name_value"
assert response.org_display_name == "org_display_name_value"
assert response.alternate_email == "alternate_email_value"
assert response.domain == "domain_value"
assert response.cloud_identity_id == "cloud_identity_id_value"
assert response.language_code == "language_code_value"
assert response.channel_partner_id == "channel_partner_id_value"
@pytest.mark.asyncio
async def test_create_customer_async_from_dict():
await test_create_customer_async(request_type=dict)
def test_create_customer_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.CreateCustomerRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_customer), "__call__") as call:
call.return_value = customers.Customer()
client.create_customer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_customer_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.CreateCustomerRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_customer), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(customers.Customer())
await client.create_customer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [service.UpdateCustomerRequest, dict,])
def test_update_customer(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_customer), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = customers.Customer(
name="name_value",
org_display_name="org_display_name_value",
alternate_email="alternate_email_value",
domain="domain_value",
cloud_identity_id="cloud_identity_id_value",
language_code="language_code_value",
channel_partner_id="channel_partner_id_value",
)
response = client.update_customer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.UpdateCustomerRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, customers.Customer)
assert response.name == "name_value"
assert response.org_display_name == "org_display_name_value"
assert response.alternate_email == "alternate_email_value"
assert response.domain == "domain_value"
assert response.cloud_identity_id == "cloud_identity_id_value"
assert response.language_code == "language_code_value"
assert response.channel_partner_id == "channel_partner_id_value"
def test_update_customer_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_customer), "__call__") as call:
client.update_customer()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.UpdateCustomerRequest()
@pytest.mark.asyncio
async def test_update_customer_async(
transport: str = "grpc_asyncio", request_type=service.UpdateCustomerRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_customer), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
customers.Customer(
name="name_value",
org_display_name="org_display_name_value",
alternate_email="alternate_email_value",
domain="domain_value",
cloud_identity_id="cloud_identity_id_value",
language_code="language_code_value",
channel_partner_id="channel_partner_id_value",
)
)
response = await client.update_customer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.UpdateCustomerRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, customers.Customer)
assert response.name == "name_value"
assert response.org_display_name == "org_display_name_value"
assert response.alternate_email == "alternate_email_value"
assert response.domain == "domain_value"
assert response.cloud_identity_id == "cloud_identity_id_value"
assert response.language_code == "language_code_value"
assert response.channel_partner_id == "channel_partner_id_value"
@pytest.mark.asyncio
async def test_update_customer_async_from_dict():
await test_update_customer_async(request_type=dict)
def test_update_customer_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UpdateCustomerRequest()
request.customer.name = "customer.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_customer), "__call__") as call:
call.return_value = customers.Customer()
client.update_customer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "customer.name=customer.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_customer_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UpdateCustomerRequest()
request.customer.name = "customer.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_customer), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(customers.Customer())
await client.update_customer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "customer.name=customer.name/value",) in kw[
"metadata"
]
@pytest.mark.parametrize("request_type", [service.DeleteCustomerRequest, dict,])
def test_delete_customer(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_customer), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_customer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.DeleteCustomerRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_customer_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_customer), "__call__") as call:
client.delete_customer()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.DeleteCustomerRequest()
@pytest.mark.asyncio
async def test_delete_customer_async(
transport: str = "grpc_asyncio", request_type=service.DeleteCustomerRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_customer), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_customer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.DeleteCustomerRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_customer_async_from_dict():
await test_delete_customer_async(request_type=dict)
def test_delete_customer_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DeleteCustomerRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_customer), "__call__") as call:
call.return_value = None
client.delete_customer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_customer_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DeleteCustomerRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_customer), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_customer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_customer_flattened():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_customer), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_customer(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_customer_flattened_error():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_customer(
service.DeleteCustomerRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_customer_flattened_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_customer), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_customer(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_customer_flattened_error_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_customer(
service.DeleteCustomerRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [service.ImportCustomerRequest, dict,])
def test_import_customer(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_customer), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = customers.Customer(
name="name_value",
org_display_name="org_display_name_value",
alternate_email="alternate_email_value",
domain="domain_value",
cloud_identity_id="cloud_identity_id_value",
language_code="language_code_value",
channel_partner_id="channel_partner_id_value",
)
response = client.import_customer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.ImportCustomerRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, customers.Customer)
assert response.name == "name_value"
assert response.org_display_name == "org_display_name_value"
assert response.alternate_email == "alternate_email_value"
assert response.domain == "domain_value"
assert response.cloud_identity_id == "cloud_identity_id_value"
assert response.language_code == "language_code_value"
assert response.channel_partner_id == "channel_partner_id_value"
def test_import_customer_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_customer), "__call__") as call:
client.import_customer()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.ImportCustomerRequest()
@pytest.mark.asyncio
async def test_import_customer_async(
transport: str = "grpc_asyncio", request_type=service.ImportCustomerRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_customer), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
customers.Customer(
name="name_value",
org_display_name="org_display_name_value",
alternate_email="alternate_email_value",
domain="domain_value",
cloud_identity_id="cloud_identity_id_value",
language_code="language_code_value",
channel_partner_id="channel_partner_id_value",
)
)
response = await client.import_customer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.ImportCustomerRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, customers.Customer)
assert response.name == "name_value"
assert response.org_display_name == "org_display_name_value"
assert response.alternate_email == "alternate_email_value"
assert response.domain == "domain_value"
assert response.cloud_identity_id == "cloud_identity_id_value"
assert response.language_code == "language_code_value"
assert response.channel_partner_id == "channel_partner_id_value"
@pytest.mark.asyncio
async def test_import_customer_async_from_dict():
await test_import_customer_async(request_type=dict)
def test_import_customer_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ImportCustomerRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_customer), "__call__") as call:
call.return_value = customers.Customer()
client.import_customer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_import_customer_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ImportCustomerRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_customer), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(customers.Customer())
await client.import_customer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [service.ProvisionCloudIdentityRequest, dict,])
def test_provision_cloud_identity(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.provision_cloud_identity), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.provision_cloud_identity(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.ProvisionCloudIdentityRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_provision_cloud_identity_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.provision_cloud_identity), "__call__"
) as call:
client.provision_cloud_identity()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.ProvisionCloudIdentityRequest()
@pytest.mark.asyncio
async def test_provision_cloud_identity_async(
transport: str = "grpc_asyncio", request_type=service.ProvisionCloudIdentityRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.provision_cloud_identity), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.provision_cloud_identity(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.ProvisionCloudIdentityRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_provision_cloud_identity_async_from_dict():
await test_provision_cloud_identity_async(request_type=dict)
def test_provision_cloud_identity_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ProvisionCloudIdentityRequest()
request.customer = "customer/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.provision_cloud_identity), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.provision_cloud_identity(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "customer=customer/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_provision_cloud_identity_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ProvisionCloudIdentityRequest()
request.customer = "customer/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.provision_cloud_identity), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.provision_cloud_identity(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "customer=customer/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [service.ListEntitlementsRequest, dict,])
def test_list_entitlements(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entitlements), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListEntitlementsResponse(
next_page_token="next_page_token_value",
)
response = client.list_entitlements(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListEntitlementsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEntitlementsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_entitlements_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entitlements), "__call__"
) as call:
client.list_entitlements()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListEntitlementsRequest()
@pytest.mark.asyncio
async def test_list_entitlements_async(
transport: str = "grpc_asyncio", request_type=service.ListEntitlementsRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entitlements), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListEntitlementsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_entitlements(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListEntitlementsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEntitlementsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_entitlements_async_from_dict():
await test_list_entitlements_async(request_type=dict)
def test_list_entitlements_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListEntitlementsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entitlements), "__call__"
) as call:
call.return_value = service.ListEntitlementsResponse()
client.list_entitlements(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_entitlements_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListEntitlementsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entitlements), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListEntitlementsResponse()
)
await client.list_entitlements(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_entitlements_pager(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entitlements), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListEntitlementsResponse(
entitlements=[
entitlements.Entitlement(),
entitlements.Entitlement(),
entitlements.Entitlement(),
],
next_page_token="abc",
),
service.ListEntitlementsResponse(entitlements=[], next_page_token="def",),
service.ListEntitlementsResponse(
entitlements=[entitlements.Entitlement(),], next_page_token="ghi",
),
service.ListEntitlementsResponse(
entitlements=[entitlements.Entitlement(), entitlements.Entitlement(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_entitlements(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, entitlements.Entitlement) for i in results)
def test_list_entitlements_pages(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entitlements), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListEntitlementsResponse(
entitlements=[
entitlements.Entitlement(),
entitlements.Entitlement(),
entitlements.Entitlement(),
],
next_page_token="abc",
),
service.ListEntitlementsResponse(entitlements=[], next_page_token="def",),
service.ListEntitlementsResponse(
entitlements=[entitlements.Entitlement(),], next_page_token="ghi",
),
service.ListEntitlementsResponse(
entitlements=[entitlements.Entitlement(), entitlements.Entitlement(),],
),
RuntimeError,
)
pages = list(client.list_entitlements(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_entitlements_async_pager():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entitlements),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListEntitlementsResponse(
entitlements=[
entitlements.Entitlement(),
entitlements.Entitlement(),
entitlements.Entitlement(),
],
next_page_token="abc",
),
service.ListEntitlementsResponse(entitlements=[], next_page_token="def",),
service.ListEntitlementsResponse(
entitlements=[entitlements.Entitlement(),], next_page_token="ghi",
),
service.ListEntitlementsResponse(
entitlements=[entitlements.Entitlement(), entitlements.Entitlement(),],
),
RuntimeError,
)
async_pager = await client.list_entitlements(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, entitlements.Entitlement) for i in responses)
@pytest.mark.asyncio
async def test_list_entitlements_async_pages():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entitlements),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListEntitlementsResponse(
entitlements=[
entitlements.Entitlement(),
entitlements.Entitlement(),
entitlements.Entitlement(),
],
next_page_token="abc",
),
service.ListEntitlementsResponse(entitlements=[], next_page_token="def",),
service.ListEntitlementsResponse(
entitlements=[entitlements.Entitlement(),], next_page_token="ghi",
),
service.ListEntitlementsResponse(
entitlements=[entitlements.Entitlement(), entitlements.Entitlement(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_entitlements(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [service.ListTransferableSkusRequest, dict,])
def test_list_transferable_skus(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transferable_skus), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListTransferableSkusResponse(
next_page_token="next_page_token_value",
)
response = client.list_transferable_skus(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListTransferableSkusRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTransferableSkusPager)
assert response.next_page_token == "next_page_token_value"
def test_list_transferable_skus_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transferable_skus), "__call__"
) as call:
client.list_transferable_skus()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListTransferableSkusRequest()
@pytest.mark.asyncio
async def test_list_transferable_skus_async(
transport: str = "grpc_asyncio", request_type=service.ListTransferableSkusRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transferable_skus), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListTransferableSkusResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_transferable_skus(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListTransferableSkusRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTransferableSkusAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_transferable_skus_async_from_dict():
await test_list_transferable_skus_async(request_type=dict)
def test_list_transferable_skus_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListTransferableSkusRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transferable_skus), "__call__"
) as call:
call.return_value = service.ListTransferableSkusResponse()
client.list_transferable_skus(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_transferable_skus_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListTransferableSkusRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transferable_skus), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListTransferableSkusResponse()
)
await client.list_transferable_skus(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_transferable_skus_pager(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transferable_skus), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListTransferableSkusResponse(
transferable_skus=[
entitlements.TransferableSku(),
entitlements.TransferableSku(),
entitlements.TransferableSku(),
],
next_page_token="abc",
),
service.ListTransferableSkusResponse(
transferable_skus=[], next_page_token="def",
),
service.ListTransferableSkusResponse(
transferable_skus=[entitlements.TransferableSku(),],
next_page_token="ghi",
),
service.ListTransferableSkusResponse(
transferable_skus=[
entitlements.TransferableSku(),
entitlements.TransferableSku(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_transferable_skus(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, entitlements.TransferableSku) for i in results)
def test_list_transferable_skus_pages(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transferable_skus), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListTransferableSkusResponse(
transferable_skus=[
entitlements.TransferableSku(),
entitlements.TransferableSku(),
entitlements.TransferableSku(),
],
next_page_token="abc",
),
service.ListTransferableSkusResponse(
transferable_skus=[], next_page_token="def",
),
service.ListTransferableSkusResponse(
transferable_skus=[entitlements.TransferableSku(),],
next_page_token="ghi",
),
service.ListTransferableSkusResponse(
transferable_skus=[
entitlements.TransferableSku(),
entitlements.TransferableSku(),
],
),
RuntimeError,
)
pages = list(client.list_transferable_skus(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_transferable_skus_async_pager():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transferable_skus),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListTransferableSkusResponse(
transferable_skus=[
entitlements.TransferableSku(),
entitlements.TransferableSku(),
entitlements.TransferableSku(),
],
next_page_token="abc",
),
service.ListTransferableSkusResponse(
transferable_skus=[], next_page_token="def",
),
service.ListTransferableSkusResponse(
transferable_skus=[entitlements.TransferableSku(),],
next_page_token="ghi",
),
service.ListTransferableSkusResponse(
transferable_skus=[
entitlements.TransferableSku(),
entitlements.TransferableSku(),
],
),
RuntimeError,
)
async_pager = await client.list_transferable_skus(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, entitlements.TransferableSku) for i in responses)
@pytest.mark.asyncio
async def test_list_transferable_skus_async_pages():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transferable_skus),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListTransferableSkusResponse(
transferable_skus=[
entitlements.TransferableSku(),
entitlements.TransferableSku(),
entitlements.TransferableSku(),
],
next_page_token="abc",
),
service.ListTransferableSkusResponse(
transferable_skus=[], next_page_token="def",
),
service.ListTransferableSkusResponse(
transferable_skus=[entitlements.TransferableSku(),],
next_page_token="ghi",
),
service.ListTransferableSkusResponse(
transferable_skus=[
entitlements.TransferableSku(),
entitlements.TransferableSku(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_transferable_skus(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [service.ListTransferableOffersRequest, dict,])
def test_list_transferable_offers(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transferable_offers), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListTransferableOffersResponse(
next_page_token="next_page_token_value",
)
response = client.list_transferable_offers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListTransferableOffersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTransferableOffersPager)
assert response.next_page_token == "next_page_token_value"
def test_list_transferable_offers_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transferable_offers), "__call__"
) as call:
client.list_transferable_offers()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListTransferableOffersRequest()
@pytest.mark.asyncio
async def test_list_transferable_offers_async(
transport: str = "grpc_asyncio", request_type=service.ListTransferableOffersRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transferable_offers), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListTransferableOffersResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_transferable_offers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListTransferableOffersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTransferableOffersAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_transferable_offers_async_from_dict():
await test_list_transferable_offers_async(request_type=dict)
def test_list_transferable_offers_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListTransferableOffersRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transferable_offers), "__call__"
) as call:
call.return_value = service.ListTransferableOffersResponse()
client.list_transferable_offers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_transferable_offers_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListTransferableOffersRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transferable_offers), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListTransferableOffersResponse()
)
await client.list_transferable_offers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_transferable_offers_pager(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transferable_offers), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListTransferableOffersResponse(
transferable_offers=[
service.TransferableOffer(),
service.TransferableOffer(),
service.TransferableOffer(),
],
next_page_token="abc",
),
service.ListTransferableOffersResponse(
transferable_offers=[], next_page_token="def",
),
service.ListTransferableOffersResponse(
transferable_offers=[service.TransferableOffer(),],
next_page_token="ghi",
),
service.ListTransferableOffersResponse(
transferable_offers=[
service.TransferableOffer(),
service.TransferableOffer(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_transferable_offers(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, service.TransferableOffer) for i in results)
def test_list_transferable_offers_pages(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transferable_offers), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListTransferableOffersResponse(
transferable_offers=[
service.TransferableOffer(),
service.TransferableOffer(),
service.TransferableOffer(),
],
next_page_token="abc",
),
service.ListTransferableOffersResponse(
transferable_offers=[], next_page_token="def",
),
service.ListTransferableOffersResponse(
transferable_offers=[service.TransferableOffer(),],
next_page_token="ghi",
),
service.ListTransferableOffersResponse(
transferable_offers=[
service.TransferableOffer(),
service.TransferableOffer(),
],
),
RuntimeError,
)
pages = list(client.list_transferable_offers(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_transferable_offers_async_pager():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transferable_offers),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListTransferableOffersResponse(
transferable_offers=[
service.TransferableOffer(),
service.TransferableOffer(),
service.TransferableOffer(),
],
next_page_token="abc",
),
service.ListTransferableOffersResponse(
transferable_offers=[], next_page_token="def",
),
service.ListTransferableOffersResponse(
transferable_offers=[service.TransferableOffer(),],
next_page_token="ghi",
),
service.ListTransferableOffersResponse(
transferable_offers=[
service.TransferableOffer(),
service.TransferableOffer(),
],
),
RuntimeError,
)
async_pager = await client.list_transferable_offers(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, service.TransferableOffer) for i in responses)
@pytest.mark.asyncio
async def test_list_transferable_offers_async_pages():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transferable_offers),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListTransferableOffersResponse(
transferable_offers=[
service.TransferableOffer(),
service.TransferableOffer(),
service.TransferableOffer(),
],
next_page_token="abc",
),
service.ListTransferableOffersResponse(
transferable_offers=[], next_page_token="def",
),
service.ListTransferableOffersResponse(
transferable_offers=[service.TransferableOffer(),],
next_page_token="ghi",
),
service.ListTransferableOffersResponse(
transferable_offers=[
service.TransferableOffer(),
service.TransferableOffer(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_transferable_offers(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [service.GetEntitlementRequest, dict,])
def test_get_entitlement(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entitlement), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = entitlements.Entitlement(
name="name_value",
offer="offer_value",
provisioning_state=entitlements.Entitlement.ProvisioningState.ACTIVE,
suspension_reasons=[
entitlements.Entitlement.SuspensionReason.RESELLER_INITIATED
],
purchase_order_id="purchase_order_id_value",
)
response = client.get_entitlement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.GetEntitlementRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, entitlements.Entitlement)
assert response.name == "name_value"
assert response.offer == "offer_value"
assert (
response.provisioning_state == entitlements.Entitlement.ProvisioningState.ACTIVE
)
assert response.suspension_reasons == [
entitlements.Entitlement.SuspensionReason.RESELLER_INITIATED
]
assert response.purchase_order_id == "purchase_order_id_value"
def test_get_entitlement_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entitlement), "__call__") as call:
client.get_entitlement()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.GetEntitlementRequest()
@pytest.mark.asyncio
async def test_get_entitlement_async(
transport: str = "grpc_asyncio", request_type=service.GetEntitlementRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entitlement), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
entitlements.Entitlement(
name="name_value",
offer="offer_value",
provisioning_state=entitlements.Entitlement.ProvisioningState.ACTIVE,
suspension_reasons=[
entitlements.Entitlement.SuspensionReason.RESELLER_INITIATED
],
purchase_order_id="purchase_order_id_value",
)
)
response = await client.get_entitlement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.GetEntitlementRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, entitlements.Entitlement)
assert response.name == "name_value"
assert response.offer == "offer_value"
assert (
response.provisioning_state == entitlements.Entitlement.ProvisioningState.ACTIVE
)
assert response.suspension_reasons == [
entitlements.Entitlement.SuspensionReason.RESELLER_INITIATED
]
assert response.purchase_order_id == "purchase_order_id_value"
@pytest.mark.asyncio
async def test_get_entitlement_async_from_dict():
await test_get_entitlement_async(request_type=dict)
def test_get_entitlement_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetEntitlementRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entitlement), "__call__") as call:
call.return_value = entitlements.Entitlement()
client.get_entitlement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_entitlement_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetEntitlementRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entitlement), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
entitlements.Entitlement()
)
await client.get_entitlement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [service.CreateEntitlementRequest, dict,])
def test_create_entitlement(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entitlement), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_entitlement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.CreateEntitlementRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_entitlement_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entitlement), "__call__"
) as call:
client.create_entitlement()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.CreateEntitlementRequest()
@pytest.mark.asyncio
async def test_create_entitlement_async(
transport: str = "grpc_asyncio", request_type=service.CreateEntitlementRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entitlement), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_entitlement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.CreateEntitlementRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_entitlement_async_from_dict():
await test_create_entitlement_async(request_type=dict)
def test_create_entitlement_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.CreateEntitlementRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entitlement), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_entitlement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_entitlement_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.CreateEntitlementRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entitlement), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_entitlement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [service.ChangeParametersRequest, dict,])
def test_change_parameters(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.change_parameters), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.change_parameters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.ChangeParametersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_change_parameters_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.change_parameters), "__call__"
) as call:
client.change_parameters()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.ChangeParametersRequest()
@pytest.mark.asyncio
async def test_change_parameters_async(
transport: str = "grpc_asyncio", request_type=service.ChangeParametersRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.change_parameters), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.change_parameters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.ChangeParametersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_change_parameters_async_from_dict():
await test_change_parameters_async(request_type=dict)
def test_change_parameters_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ChangeParametersRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.change_parameters), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.change_parameters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_change_parameters_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ChangeParametersRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.change_parameters), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.change_parameters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [service.ChangeRenewalSettingsRequest, dict,])
def test_change_renewal_settings(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.change_renewal_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.change_renewal_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.ChangeRenewalSettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_change_renewal_settings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.change_renewal_settings), "__call__"
) as call:
client.change_renewal_settings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.ChangeRenewalSettingsRequest()
@pytest.mark.asyncio
async def test_change_renewal_settings_async(
transport: str = "grpc_asyncio", request_type=service.ChangeRenewalSettingsRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.change_renewal_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.change_renewal_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.ChangeRenewalSettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_change_renewal_settings_async_from_dict():
await test_change_renewal_settings_async(request_type=dict)
def test_change_renewal_settings_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ChangeRenewalSettingsRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.change_renewal_settings), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.change_renewal_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_change_renewal_settings_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ChangeRenewalSettingsRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.change_renewal_settings), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.change_renewal_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [service.ChangeOfferRequest, dict,])
def test_change_offer(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.change_offer), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.change_offer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.ChangeOfferRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_change_offer_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.change_offer), "__call__") as call:
client.change_offer()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.ChangeOfferRequest()
@pytest.mark.asyncio
async def test_change_offer_async(
transport: str = "grpc_asyncio", request_type=service.ChangeOfferRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.change_offer), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.change_offer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.ChangeOfferRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_change_offer_async_from_dict():
await test_change_offer_async(request_type=dict)
def test_change_offer_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ChangeOfferRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.change_offer), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.change_offer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_change_offer_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ChangeOfferRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.change_offer), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.change_offer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [service.StartPaidServiceRequest, dict,])
def test_start_paid_service(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_paid_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.start_paid_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.StartPaidServiceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_start_paid_service_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_paid_service), "__call__"
) as call:
client.start_paid_service()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.StartPaidServiceRequest()
@pytest.mark.asyncio
async def test_start_paid_service_async(
transport: str = "grpc_asyncio", request_type=service.StartPaidServiceRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_paid_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.start_paid_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.StartPaidServiceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_start_paid_service_async_from_dict():
await test_start_paid_service_async(request_type=dict)
def test_start_paid_service_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.StartPaidServiceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_paid_service), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.start_paid_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_start_paid_service_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.StartPaidServiceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_paid_service), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.start_paid_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [service.SuspendEntitlementRequest, dict,])
def test_suspend_entitlement(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suspend_entitlement), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.suspend_entitlement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.SuspendEntitlementRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_suspend_entitlement_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suspend_entitlement), "__call__"
) as call:
client.suspend_entitlement()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.SuspendEntitlementRequest()
@pytest.mark.asyncio
async def test_suspend_entitlement_async(
transport: str = "grpc_asyncio", request_type=service.SuspendEntitlementRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suspend_entitlement), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.suspend_entitlement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.SuspendEntitlementRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_suspend_entitlement_async_from_dict():
await test_suspend_entitlement_async(request_type=dict)
def test_suspend_entitlement_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.SuspendEntitlementRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suspend_entitlement), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.suspend_entitlement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_suspend_entitlement_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.SuspendEntitlementRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suspend_entitlement), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.suspend_entitlement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [service.CancelEntitlementRequest, dict,])
def test_cancel_entitlement(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_entitlement), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.cancel_entitlement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.CancelEntitlementRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_cancel_entitlement_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_entitlement), "__call__"
) as call:
client.cancel_entitlement()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.CancelEntitlementRequest()
@pytest.mark.asyncio
async def test_cancel_entitlement_async(
transport: str = "grpc_asyncio", request_type=service.CancelEntitlementRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_entitlement), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.cancel_entitlement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.CancelEntitlementRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_cancel_entitlement_async_from_dict():
await test_cancel_entitlement_async(request_type=dict)
def test_cancel_entitlement_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.CancelEntitlementRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_entitlement), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.cancel_entitlement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_cancel_entitlement_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.CancelEntitlementRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_entitlement), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.cancel_entitlement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [service.ActivateEntitlementRequest, dict,])
def test_activate_entitlement(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.activate_entitlement), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.activate_entitlement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.ActivateEntitlementRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_activate_entitlement_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.activate_entitlement), "__call__"
) as call:
client.activate_entitlement()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.ActivateEntitlementRequest()
@pytest.mark.asyncio
async def test_activate_entitlement_async(
transport: str = "grpc_asyncio", request_type=service.ActivateEntitlementRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.activate_entitlement), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.activate_entitlement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.ActivateEntitlementRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_activate_entitlement_async_from_dict():
await test_activate_entitlement_async(request_type=dict)
def test_activate_entitlement_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ActivateEntitlementRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.activate_entitlement), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.activate_entitlement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_activate_entitlement_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ActivateEntitlementRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.activate_entitlement), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.activate_entitlement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [service.TransferEntitlementsRequest, dict,])
def test_transfer_entitlements(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.transfer_entitlements), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.transfer_entitlements(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.TransferEntitlementsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_transfer_entitlements_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.transfer_entitlements), "__call__"
) as call:
client.transfer_entitlements()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.TransferEntitlementsRequest()
@pytest.mark.asyncio
async def test_transfer_entitlements_async(
transport: str = "grpc_asyncio", request_type=service.TransferEntitlementsRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.transfer_entitlements), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.transfer_entitlements(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.TransferEntitlementsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_transfer_entitlements_async_from_dict():
await test_transfer_entitlements_async(request_type=dict)
def test_transfer_entitlements_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.TransferEntitlementsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.transfer_entitlements), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.transfer_entitlements(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_transfer_entitlements_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.TransferEntitlementsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.transfer_entitlements), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.transfer_entitlements(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [service.TransferEntitlementsToGoogleRequest, dict,]
)
def test_transfer_entitlements_to_google(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.transfer_entitlements_to_google), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.transfer_entitlements_to_google(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.TransferEntitlementsToGoogleRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_transfer_entitlements_to_google_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.transfer_entitlements_to_google), "__call__"
) as call:
client.transfer_entitlements_to_google()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.TransferEntitlementsToGoogleRequest()
@pytest.mark.asyncio
async def test_transfer_entitlements_to_google_async(
transport: str = "grpc_asyncio",
request_type=service.TransferEntitlementsToGoogleRequest,
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.transfer_entitlements_to_google), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.transfer_entitlements_to_google(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.TransferEntitlementsToGoogleRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_transfer_entitlements_to_google_async_from_dict():
await test_transfer_entitlements_to_google_async(request_type=dict)
def test_transfer_entitlements_to_google_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.TransferEntitlementsToGoogleRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.transfer_entitlements_to_google), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.transfer_entitlements_to_google(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_transfer_entitlements_to_google_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.TransferEntitlementsToGoogleRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.transfer_entitlements_to_google), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.transfer_entitlements_to_google(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [service.ListChannelPartnerLinksRequest, dict,]
)
def test_list_channel_partner_links(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_channel_partner_links), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListChannelPartnerLinksResponse(
next_page_token="next_page_token_value",
)
response = client.list_channel_partner_links(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListChannelPartnerLinksRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListChannelPartnerLinksPager)
assert response.next_page_token == "next_page_token_value"
def test_list_channel_partner_links_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_channel_partner_links), "__call__"
) as call:
client.list_channel_partner_links()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListChannelPartnerLinksRequest()
@pytest.mark.asyncio
async def test_list_channel_partner_links_async(
transport: str = "grpc_asyncio", request_type=service.ListChannelPartnerLinksRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_channel_partner_links), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListChannelPartnerLinksResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_channel_partner_links(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListChannelPartnerLinksRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListChannelPartnerLinksAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_channel_partner_links_async_from_dict():
await test_list_channel_partner_links_async(request_type=dict)
def test_list_channel_partner_links_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListChannelPartnerLinksRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_channel_partner_links), "__call__"
) as call:
call.return_value = service.ListChannelPartnerLinksResponse()
client.list_channel_partner_links(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_channel_partner_links_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListChannelPartnerLinksRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_channel_partner_links), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListChannelPartnerLinksResponse()
)
await client.list_channel_partner_links(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_channel_partner_links_pager(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_channel_partner_links), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListChannelPartnerLinksResponse(
channel_partner_links=[
channel_partner_links.ChannelPartnerLink(),
channel_partner_links.ChannelPartnerLink(),
channel_partner_links.ChannelPartnerLink(),
],
next_page_token="abc",
),
service.ListChannelPartnerLinksResponse(
channel_partner_links=[], next_page_token="def",
),
service.ListChannelPartnerLinksResponse(
channel_partner_links=[channel_partner_links.ChannelPartnerLink(),],
next_page_token="ghi",
),
service.ListChannelPartnerLinksResponse(
channel_partner_links=[
channel_partner_links.ChannelPartnerLink(),
channel_partner_links.ChannelPartnerLink(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_channel_partner_links(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, channel_partner_links.ChannelPartnerLink) for i in results
)
def test_list_channel_partner_links_pages(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_channel_partner_links), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListChannelPartnerLinksResponse(
channel_partner_links=[
channel_partner_links.ChannelPartnerLink(),
channel_partner_links.ChannelPartnerLink(),
channel_partner_links.ChannelPartnerLink(),
],
next_page_token="abc",
),
service.ListChannelPartnerLinksResponse(
channel_partner_links=[], next_page_token="def",
),
service.ListChannelPartnerLinksResponse(
channel_partner_links=[channel_partner_links.ChannelPartnerLink(),],
next_page_token="ghi",
),
service.ListChannelPartnerLinksResponse(
channel_partner_links=[
channel_partner_links.ChannelPartnerLink(),
channel_partner_links.ChannelPartnerLink(),
],
),
RuntimeError,
)
pages = list(client.list_channel_partner_links(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_channel_partner_links_async_pager():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_channel_partner_links),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListChannelPartnerLinksResponse(
channel_partner_links=[
channel_partner_links.ChannelPartnerLink(),
channel_partner_links.ChannelPartnerLink(),
channel_partner_links.ChannelPartnerLink(),
],
next_page_token="abc",
),
service.ListChannelPartnerLinksResponse(
channel_partner_links=[], next_page_token="def",
),
service.ListChannelPartnerLinksResponse(
channel_partner_links=[channel_partner_links.ChannelPartnerLink(),],
next_page_token="ghi",
),
service.ListChannelPartnerLinksResponse(
channel_partner_links=[
channel_partner_links.ChannelPartnerLink(),
channel_partner_links.ChannelPartnerLink(),
],
),
RuntimeError,
)
async_pager = await client.list_channel_partner_links(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, channel_partner_links.ChannelPartnerLink) for i in responses
)
@pytest.mark.asyncio
async def test_list_channel_partner_links_async_pages():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_channel_partner_links),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListChannelPartnerLinksResponse(
channel_partner_links=[
channel_partner_links.ChannelPartnerLink(),
channel_partner_links.ChannelPartnerLink(),
channel_partner_links.ChannelPartnerLink(),
],
next_page_token="abc",
),
service.ListChannelPartnerLinksResponse(
channel_partner_links=[], next_page_token="def",
),
service.ListChannelPartnerLinksResponse(
channel_partner_links=[channel_partner_links.ChannelPartnerLink(),],
next_page_token="ghi",
),
service.ListChannelPartnerLinksResponse(
channel_partner_links=[
channel_partner_links.ChannelPartnerLink(),
channel_partner_links.ChannelPartnerLink(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_channel_partner_links(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [service.GetChannelPartnerLinkRequest, dict,])
def test_get_channel_partner_link(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_channel_partner_link), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = channel_partner_links.ChannelPartnerLink(
name="name_value",
reseller_cloud_identity_id="reseller_cloud_identity_id_value",
link_state=channel_partner_links.ChannelPartnerLinkState.INVITED,
invite_link_uri="invite_link_uri_value",
public_id="public_id_value",
)
response = client.get_channel_partner_link(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.GetChannelPartnerLinkRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, channel_partner_links.ChannelPartnerLink)
assert response.name == "name_value"
assert response.reseller_cloud_identity_id == "reseller_cloud_identity_id_value"
assert response.link_state == channel_partner_links.ChannelPartnerLinkState.INVITED
assert response.invite_link_uri == "invite_link_uri_value"
assert response.public_id == "public_id_value"
def test_get_channel_partner_link_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_channel_partner_link), "__call__"
) as call:
client.get_channel_partner_link()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.GetChannelPartnerLinkRequest()
@pytest.mark.asyncio
async def test_get_channel_partner_link_async(
transport: str = "grpc_asyncio", request_type=service.GetChannelPartnerLinkRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_channel_partner_link), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
channel_partner_links.ChannelPartnerLink(
name="name_value",
reseller_cloud_identity_id="reseller_cloud_identity_id_value",
link_state=channel_partner_links.ChannelPartnerLinkState.INVITED,
invite_link_uri="invite_link_uri_value",
public_id="public_id_value",
)
)
response = await client.get_channel_partner_link(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.GetChannelPartnerLinkRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, channel_partner_links.ChannelPartnerLink)
assert response.name == "name_value"
assert response.reseller_cloud_identity_id == "reseller_cloud_identity_id_value"
assert response.link_state == channel_partner_links.ChannelPartnerLinkState.INVITED
assert response.invite_link_uri == "invite_link_uri_value"
assert response.public_id == "public_id_value"
@pytest.mark.asyncio
async def test_get_channel_partner_link_async_from_dict():
await test_get_channel_partner_link_async(request_type=dict)
def test_get_channel_partner_link_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetChannelPartnerLinkRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_channel_partner_link), "__call__"
) as call:
call.return_value = channel_partner_links.ChannelPartnerLink()
client.get_channel_partner_link(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_channel_partner_link_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetChannelPartnerLinkRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_channel_partner_link), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
channel_partner_links.ChannelPartnerLink()
)
await client.get_channel_partner_link(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [service.CreateChannelPartnerLinkRequest, dict,]
)
def test_create_channel_partner_link(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_channel_partner_link), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = channel_partner_links.ChannelPartnerLink(
name="name_value",
reseller_cloud_identity_id="reseller_cloud_identity_id_value",
link_state=channel_partner_links.ChannelPartnerLinkState.INVITED,
invite_link_uri="invite_link_uri_value",
public_id="public_id_value",
)
response = client.create_channel_partner_link(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.CreateChannelPartnerLinkRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, channel_partner_links.ChannelPartnerLink)
assert response.name == "name_value"
assert response.reseller_cloud_identity_id == "reseller_cloud_identity_id_value"
assert response.link_state == channel_partner_links.ChannelPartnerLinkState.INVITED
assert response.invite_link_uri == "invite_link_uri_value"
assert response.public_id == "public_id_value"
def test_create_channel_partner_link_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_channel_partner_link), "__call__"
) as call:
client.create_channel_partner_link()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.CreateChannelPartnerLinkRequest()
@pytest.mark.asyncio
async def test_create_channel_partner_link_async(
transport: str = "grpc_asyncio",
request_type=service.CreateChannelPartnerLinkRequest,
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_channel_partner_link), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
channel_partner_links.ChannelPartnerLink(
name="name_value",
reseller_cloud_identity_id="reseller_cloud_identity_id_value",
link_state=channel_partner_links.ChannelPartnerLinkState.INVITED,
invite_link_uri="invite_link_uri_value",
public_id="public_id_value",
)
)
response = await client.create_channel_partner_link(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.CreateChannelPartnerLinkRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, channel_partner_links.ChannelPartnerLink)
assert response.name == "name_value"
assert response.reseller_cloud_identity_id == "reseller_cloud_identity_id_value"
assert response.link_state == channel_partner_links.ChannelPartnerLinkState.INVITED
assert response.invite_link_uri == "invite_link_uri_value"
assert response.public_id == "public_id_value"
@pytest.mark.asyncio
async def test_create_channel_partner_link_async_from_dict():
await test_create_channel_partner_link_async(request_type=dict)
def test_create_channel_partner_link_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.CreateChannelPartnerLinkRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_channel_partner_link), "__call__"
) as call:
call.return_value = channel_partner_links.ChannelPartnerLink()
client.create_channel_partner_link(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_channel_partner_link_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.CreateChannelPartnerLinkRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_channel_partner_link), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
channel_partner_links.ChannelPartnerLink()
)
await client.create_channel_partner_link(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [service.UpdateChannelPartnerLinkRequest, dict,]
)
def test_update_channel_partner_link(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_channel_partner_link), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = channel_partner_links.ChannelPartnerLink(
name="name_value",
reseller_cloud_identity_id="reseller_cloud_identity_id_value",
link_state=channel_partner_links.ChannelPartnerLinkState.INVITED,
invite_link_uri="invite_link_uri_value",
public_id="public_id_value",
)
response = client.update_channel_partner_link(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.UpdateChannelPartnerLinkRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, channel_partner_links.ChannelPartnerLink)
assert response.name == "name_value"
assert response.reseller_cloud_identity_id == "reseller_cloud_identity_id_value"
assert response.link_state == channel_partner_links.ChannelPartnerLinkState.INVITED
assert response.invite_link_uri == "invite_link_uri_value"
assert response.public_id == "public_id_value"
def test_update_channel_partner_link_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_channel_partner_link), "__call__"
) as call:
client.update_channel_partner_link()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.UpdateChannelPartnerLinkRequest()
@pytest.mark.asyncio
async def test_update_channel_partner_link_async(
transport: str = "grpc_asyncio",
request_type=service.UpdateChannelPartnerLinkRequest,
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_channel_partner_link), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
channel_partner_links.ChannelPartnerLink(
name="name_value",
reseller_cloud_identity_id="reseller_cloud_identity_id_value",
link_state=channel_partner_links.ChannelPartnerLinkState.INVITED,
invite_link_uri="invite_link_uri_value",
public_id="public_id_value",
)
)
response = await client.update_channel_partner_link(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.UpdateChannelPartnerLinkRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, channel_partner_links.ChannelPartnerLink)
assert response.name == "name_value"
assert response.reseller_cloud_identity_id == "reseller_cloud_identity_id_value"
assert response.link_state == channel_partner_links.ChannelPartnerLinkState.INVITED
assert response.invite_link_uri == "invite_link_uri_value"
assert response.public_id == "public_id_value"
@pytest.mark.asyncio
async def test_update_channel_partner_link_async_from_dict():
await test_update_channel_partner_link_async(request_type=dict)
def test_update_channel_partner_link_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UpdateChannelPartnerLinkRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_channel_partner_link), "__call__"
) as call:
call.return_value = channel_partner_links.ChannelPartnerLink()
client.update_channel_partner_link(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_channel_partner_link_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UpdateChannelPartnerLinkRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_channel_partner_link), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
channel_partner_links.ChannelPartnerLink()
)
await client.update_channel_partner_link(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [service.LookupOfferRequest, dict,])
def test_lookup_offer(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lookup_offer), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = offers.Offer(name="name_value",)
response = client.lookup_offer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.LookupOfferRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, offers.Offer)
assert response.name == "name_value"
def test_lookup_offer_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lookup_offer), "__call__") as call:
client.lookup_offer()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.LookupOfferRequest()
@pytest.mark.asyncio
async def test_lookup_offer_async(
transport: str = "grpc_asyncio", request_type=service.LookupOfferRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lookup_offer), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
offers.Offer(name="name_value",)
)
response = await client.lookup_offer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.LookupOfferRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, offers.Offer)
assert response.name == "name_value"
@pytest.mark.asyncio
async def test_lookup_offer_async_from_dict():
await test_lookup_offer_async(request_type=dict)
def test_lookup_offer_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.LookupOfferRequest()
request.entitlement = "entitlement/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lookup_offer), "__call__") as call:
call.return_value = offers.Offer()
client.lookup_offer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "entitlement=entitlement/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_lookup_offer_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.LookupOfferRequest()
request.entitlement = "entitlement/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lookup_offer), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(offers.Offer())
await client.lookup_offer(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "entitlement=entitlement/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [service.ListProductsRequest, dict,])
def test_list_products(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_products), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListProductsResponse(
next_page_token="next_page_token_value",
)
response = client.list_products(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListProductsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListProductsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_products_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_products), "__call__") as call:
client.list_products()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListProductsRequest()
@pytest.mark.asyncio
async def test_list_products_async(
transport: str = "grpc_asyncio", request_type=service.ListProductsRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_products), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListProductsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_products(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListProductsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListProductsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_products_async_from_dict():
await test_list_products_async(request_type=dict)
def test_list_products_pager(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_products), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListProductsResponse(
products=[products.Product(), products.Product(), products.Product(),],
next_page_token="abc",
),
service.ListProductsResponse(products=[], next_page_token="def",),
service.ListProductsResponse(
products=[products.Product(),], next_page_token="ghi",
),
service.ListProductsResponse(
products=[products.Product(), products.Product(),],
),
RuntimeError,
)
metadata = ()
pager = client.list_products(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, products.Product) for i in results)
def test_list_products_pages(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_products), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListProductsResponse(
products=[products.Product(), products.Product(), products.Product(),],
next_page_token="abc",
),
service.ListProductsResponse(products=[], next_page_token="def",),
service.ListProductsResponse(
products=[products.Product(),], next_page_token="ghi",
),
service.ListProductsResponse(
products=[products.Product(), products.Product(),],
),
RuntimeError,
)
pages = list(client.list_products(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_products_async_pager():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_products), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListProductsResponse(
products=[products.Product(), products.Product(), products.Product(),],
next_page_token="abc",
),
service.ListProductsResponse(products=[], next_page_token="def",),
service.ListProductsResponse(
products=[products.Product(),], next_page_token="ghi",
),
service.ListProductsResponse(
products=[products.Product(), products.Product(),],
),
RuntimeError,
)
async_pager = await client.list_products(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, products.Product) for i in responses)
@pytest.mark.asyncio
async def test_list_products_async_pages():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_products), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListProductsResponse(
products=[products.Product(), products.Product(), products.Product(),],
next_page_token="abc",
),
service.ListProductsResponse(products=[], next_page_token="def",),
service.ListProductsResponse(
products=[products.Product(),], next_page_token="ghi",
),
service.ListProductsResponse(
products=[products.Product(), products.Product(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_products(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [service.ListSkusRequest, dict,])
def test_list_skus(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_skus), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListSkusResponse(
next_page_token="next_page_token_value",
)
response = client.list_skus(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListSkusRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSkusPager)
assert response.next_page_token == "next_page_token_value"
def test_list_skus_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_skus), "__call__") as call:
client.list_skus()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListSkusRequest()
@pytest.mark.asyncio
async def test_list_skus_async(
transport: str = "grpc_asyncio", request_type=service.ListSkusRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_skus), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListSkusResponse(next_page_token="next_page_token_value",)
)
response = await client.list_skus(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListSkusRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSkusAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_skus_async_from_dict():
await test_list_skus_async(request_type=dict)
def test_list_skus_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListSkusRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_skus), "__call__") as call:
call.return_value = service.ListSkusResponse()
client.list_skus(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_skus_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListSkusRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_skus), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListSkusResponse()
)
await client.list_skus(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_skus_pager(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_skus), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListSkusResponse(
skus=[products.Sku(), products.Sku(), products.Sku(),],
next_page_token="abc",
),
service.ListSkusResponse(skus=[], next_page_token="def",),
service.ListSkusResponse(skus=[products.Sku(),], next_page_token="ghi",),
service.ListSkusResponse(skus=[products.Sku(), products.Sku(),],),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_skus(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, products.Sku) for i in results)
def test_list_skus_pages(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_skus), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListSkusResponse(
skus=[products.Sku(), products.Sku(), products.Sku(),],
next_page_token="abc",
),
service.ListSkusResponse(skus=[], next_page_token="def",),
service.ListSkusResponse(skus=[products.Sku(),], next_page_token="ghi",),
service.ListSkusResponse(skus=[products.Sku(), products.Sku(),],),
RuntimeError,
)
pages = list(client.list_skus(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_skus_async_pager():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_skus), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListSkusResponse(
skus=[products.Sku(), products.Sku(), products.Sku(),],
next_page_token="abc",
),
service.ListSkusResponse(skus=[], next_page_token="def",),
service.ListSkusResponse(skus=[products.Sku(),], next_page_token="ghi",),
service.ListSkusResponse(skus=[products.Sku(), products.Sku(),],),
RuntimeError,
)
async_pager = await client.list_skus(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, products.Sku) for i in responses)
@pytest.mark.asyncio
async def test_list_skus_async_pages():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_skus), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListSkusResponse(
skus=[products.Sku(), products.Sku(), products.Sku(),],
next_page_token="abc",
),
service.ListSkusResponse(skus=[], next_page_token="def",),
service.ListSkusResponse(skus=[products.Sku(),], next_page_token="ghi",),
service.ListSkusResponse(skus=[products.Sku(), products.Sku(),],),
RuntimeError,
)
pages = []
async for page_ in (await client.list_skus(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [service.ListOffersRequest, dict,])
def test_list_offers(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_offers), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListOffersResponse(
next_page_token="next_page_token_value",
)
response = client.list_offers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListOffersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListOffersPager)
assert response.next_page_token == "next_page_token_value"
def test_list_offers_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_offers), "__call__") as call:
client.list_offers()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListOffersRequest()
@pytest.mark.asyncio
async def test_list_offers_async(
transport: str = "grpc_asyncio", request_type=service.ListOffersRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_offers), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListOffersResponse(next_page_token="next_page_token_value",)
)
response = await client.list_offers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListOffersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListOffersAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_offers_async_from_dict():
await test_list_offers_async(request_type=dict)
def test_list_offers_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListOffersRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_offers), "__call__") as call:
call.return_value = service.ListOffersResponse()
client.list_offers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_offers_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListOffersRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_offers), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListOffersResponse()
)
await client.list_offers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_offers_pager(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_offers), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListOffersResponse(
offers=[offers.Offer(), offers.Offer(), offers.Offer(),],
next_page_token="abc",
),
service.ListOffersResponse(offers=[], next_page_token="def",),
service.ListOffersResponse(
offers=[offers.Offer(),], next_page_token="ghi",
),
service.ListOffersResponse(offers=[offers.Offer(), offers.Offer(),],),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_offers(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, offers.Offer) for i in results)
def test_list_offers_pages(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_offers), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListOffersResponse(
offers=[offers.Offer(), offers.Offer(), offers.Offer(),],
next_page_token="abc",
),
service.ListOffersResponse(offers=[], next_page_token="def",),
service.ListOffersResponse(
offers=[offers.Offer(),], next_page_token="ghi",
),
service.ListOffersResponse(offers=[offers.Offer(), offers.Offer(),],),
RuntimeError,
)
pages = list(client.list_offers(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_offers_async_pager():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_offers), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListOffersResponse(
offers=[offers.Offer(), offers.Offer(), offers.Offer(),],
next_page_token="abc",
),
service.ListOffersResponse(offers=[], next_page_token="def",),
service.ListOffersResponse(
offers=[offers.Offer(),], next_page_token="ghi",
),
service.ListOffersResponse(offers=[offers.Offer(), offers.Offer(),],),
RuntimeError,
)
async_pager = await client.list_offers(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, offers.Offer) for i in responses)
@pytest.mark.asyncio
async def test_list_offers_async_pages():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_offers), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListOffersResponse(
offers=[offers.Offer(), offers.Offer(), offers.Offer(),],
next_page_token="abc",
),
service.ListOffersResponse(offers=[], next_page_token="def",),
service.ListOffersResponse(
offers=[offers.Offer(),], next_page_token="ghi",
),
service.ListOffersResponse(offers=[offers.Offer(), offers.Offer(),],),
RuntimeError,
)
pages = []
async for page_ in (await client.list_offers(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [service.ListPurchasableSkusRequest, dict,])
def test_list_purchasable_skus(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_purchasable_skus), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListPurchasableSkusResponse(
next_page_token="next_page_token_value",
)
response = client.list_purchasable_skus(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListPurchasableSkusRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPurchasableSkusPager)
assert response.next_page_token == "next_page_token_value"
def test_list_purchasable_skus_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_purchasable_skus), "__call__"
) as call:
client.list_purchasable_skus()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListPurchasableSkusRequest()
@pytest.mark.asyncio
async def test_list_purchasable_skus_async(
transport: str = "grpc_asyncio", request_type=service.ListPurchasableSkusRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_purchasable_skus), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListPurchasableSkusResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_purchasable_skus(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListPurchasableSkusRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPurchasableSkusAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_purchasable_skus_async_from_dict():
await test_list_purchasable_skus_async(request_type=dict)
def test_list_purchasable_skus_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListPurchasableSkusRequest()
request.customer = "customer/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_purchasable_skus), "__call__"
) as call:
call.return_value = service.ListPurchasableSkusResponse()
client.list_purchasable_skus(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "customer=customer/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_purchasable_skus_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListPurchasableSkusRequest()
request.customer = "customer/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_purchasable_skus), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListPurchasableSkusResponse()
)
await client.list_purchasable_skus(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "customer=customer/value",) in kw["metadata"]
def test_list_purchasable_skus_pager(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_purchasable_skus), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListPurchasableSkusResponse(
purchasable_skus=[
service.PurchasableSku(),
service.PurchasableSku(),
service.PurchasableSku(),
],
next_page_token="abc",
),
service.ListPurchasableSkusResponse(
purchasable_skus=[], next_page_token="def",
),
service.ListPurchasableSkusResponse(
purchasable_skus=[service.PurchasableSku(),], next_page_token="ghi",
),
service.ListPurchasableSkusResponse(
purchasable_skus=[service.PurchasableSku(), service.PurchasableSku(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("customer", ""),)),
)
pager = client.list_purchasable_skus(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, service.PurchasableSku) for i in results)
def test_list_purchasable_skus_pages(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_purchasable_skus), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListPurchasableSkusResponse(
purchasable_skus=[
service.PurchasableSku(),
service.PurchasableSku(),
service.PurchasableSku(),
],
next_page_token="abc",
),
service.ListPurchasableSkusResponse(
purchasable_skus=[], next_page_token="def",
),
service.ListPurchasableSkusResponse(
purchasable_skus=[service.PurchasableSku(),], next_page_token="ghi",
),
service.ListPurchasableSkusResponse(
purchasable_skus=[service.PurchasableSku(), service.PurchasableSku(),],
),
RuntimeError,
)
pages = list(client.list_purchasable_skus(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_purchasable_skus_async_pager():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_purchasable_skus),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListPurchasableSkusResponse(
purchasable_skus=[
service.PurchasableSku(),
service.PurchasableSku(),
service.PurchasableSku(),
],
next_page_token="abc",
),
service.ListPurchasableSkusResponse(
purchasable_skus=[], next_page_token="def",
),
service.ListPurchasableSkusResponse(
purchasable_skus=[service.PurchasableSku(),], next_page_token="ghi",
),
service.ListPurchasableSkusResponse(
purchasable_skus=[service.PurchasableSku(), service.PurchasableSku(),],
),
RuntimeError,
)
async_pager = await client.list_purchasable_skus(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, service.PurchasableSku) for i in responses)
@pytest.mark.asyncio
async def test_list_purchasable_skus_async_pages():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_purchasable_skus),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListPurchasableSkusResponse(
purchasable_skus=[
service.PurchasableSku(),
service.PurchasableSku(),
service.PurchasableSku(),
],
next_page_token="abc",
),
service.ListPurchasableSkusResponse(
purchasable_skus=[], next_page_token="def",
),
service.ListPurchasableSkusResponse(
purchasable_skus=[service.PurchasableSku(),], next_page_token="ghi",
),
service.ListPurchasableSkusResponse(
purchasable_skus=[service.PurchasableSku(), service.PurchasableSku(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_purchasable_skus(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [service.ListPurchasableOffersRequest, dict,])
def test_list_purchasable_offers(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_purchasable_offers), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListPurchasableOffersResponse(
next_page_token="next_page_token_value",
)
response = client.list_purchasable_offers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListPurchasableOffersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPurchasableOffersPager)
assert response.next_page_token == "next_page_token_value"
def test_list_purchasable_offers_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_purchasable_offers), "__call__"
) as call:
client.list_purchasable_offers()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListPurchasableOffersRequest()
@pytest.mark.asyncio
async def test_list_purchasable_offers_async(
transport: str = "grpc_asyncio", request_type=service.ListPurchasableOffersRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_purchasable_offers), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListPurchasableOffersResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_purchasable_offers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListPurchasableOffersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPurchasableOffersAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_purchasable_offers_async_from_dict():
await test_list_purchasable_offers_async(request_type=dict)
def test_list_purchasable_offers_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListPurchasableOffersRequest()
request.customer = "customer/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_purchasable_offers), "__call__"
) as call:
call.return_value = service.ListPurchasableOffersResponse()
client.list_purchasable_offers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "customer=customer/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_purchasable_offers_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListPurchasableOffersRequest()
request.customer = "customer/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_purchasable_offers), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListPurchasableOffersResponse()
)
await client.list_purchasable_offers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "customer=customer/value",) in kw["metadata"]
def test_list_purchasable_offers_pager(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_purchasable_offers), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListPurchasableOffersResponse(
purchasable_offers=[
service.PurchasableOffer(),
service.PurchasableOffer(),
service.PurchasableOffer(),
],
next_page_token="abc",
),
service.ListPurchasableOffersResponse(
purchasable_offers=[], next_page_token="def",
),
service.ListPurchasableOffersResponse(
purchasable_offers=[service.PurchasableOffer(),], next_page_token="ghi",
),
service.ListPurchasableOffersResponse(
purchasable_offers=[
service.PurchasableOffer(),
service.PurchasableOffer(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("customer", ""),)),
)
pager = client.list_purchasable_offers(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, service.PurchasableOffer) for i in results)
def test_list_purchasable_offers_pages(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_purchasable_offers), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListPurchasableOffersResponse(
purchasable_offers=[
service.PurchasableOffer(),
service.PurchasableOffer(),
service.PurchasableOffer(),
],
next_page_token="abc",
),
service.ListPurchasableOffersResponse(
purchasable_offers=[], next_page_token="def",
),
service.ListPurchasableOffersResponse(
purchasable_offers=[service.PurchasableOffer(),], next_page_token="ghi",
),
service.ListPurchasableOffersResponse(
purchasable_offers=[
service.PurchasableOffer(),
service.PurchasableOffer(),
],
),
RuntimeError,
)
pages = list(client.list_purchasable_offers(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_purchasable_offers_async_pager():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_purchasable_offers),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListPurchasableOffersResponse(
purchasable_offers=[
service.PurchasableOffer(),
service.PurchasableOffer(),
service.PurchasableOffer(),
],
next_page_token="abc",
),
service.ListPurchasableOffersResponse(
purchasable_offers=[], next_page_token="def",
),
service.ListPurchasableOffersResponse(
purchasable_offers=[service.PurchasableOffer(),], next_page_token="ghi",
),
service.ListPurchasableOffersResponse(
purchasable_offers=[
service.PurchasableOffer(),
service.PurchasableOffer(),
],
),
RuntimeError,
)
async_pager = await client.list_purchasable_offers(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, service.PurchasableOffer) for i in responses)
@pytest.mark.asyncio
async def test_list_purchasable_offers_async_pages():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_purchasable_offers),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListPurchasableOffersResponse(
purchasable_offers=[
service.PurchasableOffer(),
service.PurchasableOffer(),
service.PurchasableOffer(),
],
next_page_token="abc",
),
service.ListPurchasableOffersResponse(
purchasable_offers=[], next_page_token="def",
),
service.ListPurchasableOffersResponse(
purchasable_offers=[service.PurchasableOffer(),], next_page_token="ghi",
),
service.ListPurchasableOffersResponse(
purchasable_offers=[
service.PurchasableOffer(),
service.PurchasableOffer(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_purchasable_offers(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [service.RegisterSubscriberRequest, dict,])
def test_register_subscriber(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.register_subscriber), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.RegisterSubscriberResponse(topic="topic_value",)
response = client.register_subscriber(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.RegisterSubscriberRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, service.RegisterSubscriberResponse)
assert response.topic == "topic_value"
def test_register_subscriber_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.register_subscriber), "__call__"
) as call:
client.register_subscriber()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.RegisterSubscriberRequest()
@pytest.mark.asyncio
async def test_register_subscriber_async(
transport: str = "grpc_asyncio", request_type=service.RegisterSubscriberRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.register_subscriber), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.RegisterSubscriberResponse(topic="topic_value",)
)
response = await client.register_subscriber(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.RegisterSubscriberRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, service.RegisterSubscriberResponse)
assert response.topic == "topic_value"
@pytest.mark.asyncio
async def test_register_subscriber_async_from_dict():
await test_register_subscriber_async(request_type=dict)
def test_register_subscriber_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.RegisterSubscriberRequest()
request.account = "account/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.register_subscriber), "__call__"
) as call:
call.return_value = service.RegisterSubscriberResponse()
client.register_subscriber(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "account=account/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_register_subscriber_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.RegisterSubscriberRequest()
request.account = "account/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.register_subscriber), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.RegisterSubscriberResponse()
)
await client.register_subscriber(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "account=account/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [service.UnregisterSubscriberRequest, dict,])
def test_unregister_subscriber(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.unregister_subscriber), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.UnregisterSubscriberResponse(topic="topic_value",)
response = client.unregister_subscriber(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.UnregisterSubscriberRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, service.UnregisterSubscriberResponse)
assert response.topic == "topic_value"
def test_unregister_subscriber_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.unregister_subscriber), "__call__"
) as call:
client.unregister_subscriber()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.UnregisterSubscriberRequest()
@pytest.mark.asyncio
async def test_unregister_subscriber_async(
transport: str = "grpc_asyncio", request_type=service.UnregisterSubscriberRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.unregister_subscriber), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.UnregisterSubscriberResponse(topic="topic_value",)
)
response = await client.unregister_subscriber(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.UnregisterSubscriberRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, service.UnregisterSubscriberResponse)
assert response.topic == "topic_value"
@pytest.mark.asyncio
async def test_unregister_subscriber_async_from_dict():
await test_unregister_subscriber_async(request_type=dict)
def test_unregister_subscriber_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UnregisterSubscriberRequest()
request.account = "account/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.unregister_subscriber), "__call__"
) as call:
call.return_value = service.UnregisterSubscriberResponse()
client.unregister_subscriber(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "account=account/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_unregister_subscriber_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UnregisterSubscriberRequest()
request.account = "account/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.unregister_subscriber), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.UnregisterSubscriberResponse()
)
await client.unregister_subscriber(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "account=account/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [service.ListSubscribersRequest, dict,])
def test_list_subscribers(request_type, transport: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_subscribers), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListSubscribersResponse(
topic="topic_value",
service_accounts=["service_accounts_value"],
next_page_token="next_page_token_value",
)
response = client.list_subscribers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListSubscribersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSubscribersPager)
assert response.topic == "topic_value"
assert response.service_accounts == ["service_accounts_value"]
assert response.next_page_token == "next_page_token_value"
def test_list_subscribers_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_subscribers), "__call__") as call:
client.list_subscribers()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListSubscribersRequest()
@pytest.mark.asyncio
async def test_list_subscribers_async(
transport: str = "grpc_asyncio", request_type=service.ListSubscribersRequest
):
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_subscribers), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListSubscribersResponse(
topic="topic_value",
service_accounts=["service_accounts_value"],
next_page_token="next_page_token_value",
)
)
response = await client.list_subscribers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListSubscribersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSubscribersAsyncPager)
assert response.topic == "topic_value"
assert response.service_accounts == ["service_accounts_value"]
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_subscribers_async_from_dict():
await test_list_subscribers_async(request_type=dict)
def test_list_subscribers_field_headers():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListSubscribersRequest()
request.account = "account/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_subscribers), "__call__") as call:
call.return_value = service.ListSubscribersResponse()
client.list_subscribers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "account=account/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_subscribers_field_headers_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListSubscribersRequest()
request.account = "account/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_subscribers), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListSubscribersResponse()
)
await client.list_subscribers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "account=account/value",) in kw["metadata"]
def test_list_subscribers_pager(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_subscribers), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListSubscribersResponse(
service_accounts=[str(), str(), str(),], next_page_token="abc",
),
service.ListSubscribersResponse(
service_accounts=[], next_page_token="def",
),
service.ListSubscribersResponse(
service_accounts=[str(),], next_page_token="ghi",
),
service.ListSubscribersResponse(service_accounts=[str(), str(),],),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("account", ""),)),
)
pager = client.list_subscribers(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, str) for i in results)
def test_list_subscribers_pages(transport_name: str = "grpc"):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_subscribers), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListSubscribersResponse(
service_accounts=[str(), str(), str(),], next_page_token="abc",
),
service.ListSubscribersResponse(
service_accounts=[], next_page_token="def",
),
service.ListSubscribersResponse(
service_accounts=[str(),], next_page_token="ghi",
),
service.ListSubscribersResponse(service_accounts=[str(), str(),],),
RuntimeError,
)
pages = list(client.list_subscribers(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_subscribers_async_pager():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscribers), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListSubscribersResponse(
service_accounts=[str(), str(), str(),], next_page_token="abc",
),
service.ListSubscribersResponse(
service_accounts=[], next_page_token="def",
),
service.ListSubscribersResponse(
service_accounts=[str(),], next_page_token="ghi",
),
service.ListSubscribersResponse(service_accounts=[str(), str(),],),
RuntimeError,
)
async_pager = await client.list_subscribers(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, str) for i in responses)
@pytest.mark.asyncio
async def test_list_subscribers_async_pages():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscribers), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListSubscribersResponse(
service_accounts=[str(), str(), str(),], next_page_token="abc",
),
service.ListSubscribersResponse(
service_accounts=[], next_page_token="def",
),
service.ListSubscribersResponse(
service_accounts=[str(),], next_page_token="ghi",
),
service.ListSubscribersResponse(service_accounts=[str(), str(),],),
RuntimeError,
)
pages = []
async for page_ in (await client.list_subscribers(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.CloudChannelServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.CloudChannelServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CloudChannelServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.CloudChannelServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = CloudChannelServiceClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = CloudChannelServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.CloudChannelServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CloudChannelServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.CloudChannelServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = CloudChannelServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.CloudChannelServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.CloudChannelServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.CloudChannelServiceGrpcTransport,
transports.CloudChannelServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(client.transport, transports.CloudChannelServiceGrpcTransport,)
def test_cloud_channel_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.CloudChannelServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_cloud_channel_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.channel_v1.services.cloud_channel_service.transports.CloudChannelServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.CloudChannelServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_customers",
"get_customer",
"check_cloud_identity_accounts_exist",
"create_customer",
"update_customer",
"delete_customer",
"import_customer",
"provision_cloud_identity",
"list_entitlements",
"list_transferable_skus",
"list_transferable_offers",
"get_entitlement",
"create_entitlement",
"change_parameters",
"change_renewal_settings",
"change_offer",
"start_paid_service",
"suspend_entitlement",
"cancel_entitlement",
"activate_entitlement",
"transfer_entitlements",
"transfer_entitlements_to_google",
"list_channel_partner_links",
"get_channel_partner_link",
"create_channel_partner_link",
"update_channel_partner_link",
"lookup_offer",
"list_products",
"list_skus",
"list_offers",
"list_purchasable_skus",
"list_purchasable_offers",
"register_subscriber",
"unregister_subscriber",
"list_subscribers",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_cloud_channel_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.channel_v1.services.cloud_channel_service.transports.CloudChannelServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CloudChannelServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/apps.order",),
quota_project_id="octopus",
)
def test_cloud_channel_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.channel_v1.services.cloud_channel_service.transports.CloudChannelServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CloudChannelServiceTransport()
adc.assert_called_once()
def test_cloud_channel_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
CloudChannelServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/apps.order",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.CloudChannelServiceGrpcTransport,
transports.CloudChannelServiceGrpcAsyncIOTransport,
],
)
def test_cloud_channel_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/apps.order",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.CloudChannelServiceGrpcTransport, grpc_helpers),
(transports.CloudChannelServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_cloud_channel_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"cloudchannel.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/apps.order",),
scopes=["1", "2"],
default_host="cloudchannel.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.CloudChannelServiceGrpcTransport,
transports.CloudChannelServiceGrpcAsyncIOTransport,
],
)
def test_cloud_channel_service_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_cloud_channel_service_host_no_port():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="cloudchannel.googleapis.com"
),
)
assert client.transport._host == "cloudchannel.googleapis.com:443"
def test_cloud_channel_service_host_with_port():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="cloudchannel.googleapis.com:8000"
),
)
assert client.transport._host == "cloudchannel.googleapis.com:8000"
def test_cloud_channel_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.CloudChannelServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_cloud_channel_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.CloudChannelServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.CloudChannelServiceGrpcTransport,
transports.CloudChannelServiceGrpcAsyncIOTransport,
],
)
def test_cloud_channel_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.CloudChannelServiceGrpcTransport,
transports.CloudChannelServiceGrpcAsyncIOTransport,
],
)
def test_cloud_channel_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_cloud_channel_service_grpc_lro_client():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_cloud_channel_service_grpc_lro_async_client():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_channel_partner_link_path():
account = "squid"
channel_partner_link = "clam"
expected = "accounts/{account}/channelPartnerLinks/{channel_partner_link}".format(
account=account, channel_partner_link=channel_partner_link,
)
actual = CloudChannelServiceClient.channel_partner_link_path(
account, channel_partner_link
)
assert expected == actual
def test_parse_channel_partner_link_path():
expected = {
"account": "whelk",
"channel_partner_link": "octopus",
}
path = CloudChannelServiceClient.channel_partner_link_path(**expected)
# Check that the path construction is reversible.
actual = CloudChannelServiceClient.parse_channel_partner_link_path(path)
assert expected == actual
def test_customer_path():
account = "oyster"
customer = "nudibranch"
expected = "accounts/{account}/customers/{customer}".format(
account=account, customer=customer,
)
actual = CloudChannelServiceClient.customer_path(account, customer)
assert expected == actual
def test_parse_customer_path():
expected = {
"account": "cuttlefish",
"customer": "mussel",
}
path = CloudChannelServiceClient.customer_path(**expected)
# Check that the path construction is reversible.
actual = CloudChannelServiceClient.parse_customer_path(path)
assert expected == actual
def test_entitlement_path():
account = "winkle"
customer = "nautilus"
entitlement = "scallop"
expected = "accounts/{account}/customers/{customer}/entitlements/{entitlement}".format(
account=account, customer=customer, entitlement=entitlement,
)
actual = CloudChannelServiceClient.entitlement_path(account, customer, entitlement)
assert expected == actual
def test_parse_entitlement_path():
expected = {
"account": "abalone",
"customer": "squid",
"entitlement": "clam",
}
path = CloudChannelServiceClient.entitlement_path(**expected)
# Check that the path construction is reversible.
actual = CloudChannelServiceClient.parse_entitlement_path(path)
assert expected == actual
def test_offer_path():
account = "whelk"
offer = "octopus"
expected = "accounts/{account}/offers/{offer}".format(account=account, offer=offer,)
actual = CloudChannelServiceClient.offer_path(account, offer)
assert expected == actual
def test_parse_offer_path():
expected = {
"account": "oyster",
"offer": "nudibranch",
}
path = CloudChannelServiceClient.offer_path(**expected)
# Check that the path construction is reversible.
actual = CloudChannelServiceClient.parse_offer_path(path)
assert expected == actual
def test_product_path():
product = "cuttlefish"
expected = "products/{product}".format(product=product,)
actual = CloudChannelServiceClient.product_path(product)
assert expected == actual
def test_parse_product_path():
expected = {
"product": "mussel",
}
path = CloudChannelServiceClient.product_path(**expected)
# Check that the path construction is reversible.
actual = CloudChannelServiceClient.parse_product_path(path)
assert expected == actual
def test_sku_path():
product = "winkle"
sku = "nautilus"
expected = "products/{product}/skus/{sku}".format(product=product, sku=sku,)
actual = CloudChannelServiceClient.sku_path(product, sku)
assert expected == actual
def test_parse_sku_path():
expected = {
"product": "scallop",
"sku": "abalone",
}
path = CloudChannelServiceClient.sku_path(**expected)
# Check that the path construction is reversible.
actual = CloudChannelServiceClient.parse_sku_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = CloudChannelServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = CloudChannelServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = CloudChannelServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = CloudChannelServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = CloudChannelServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = CloudChannelServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = CloudChannelServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = CloudChannelServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = CloudChannelServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = CloudChannelServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = CloudChannelServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = CloudChannelServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = CloudChannelServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = CloudChannelServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = CloudChannelServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.CloudChannelServiceTransport, "_prep_wrapped_messages"
) as prep:
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.CloudChannelServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = CloudChannelServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = CloudChannelServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = CloudChannelServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(CloudChannelServiceClient, transports.CloudChannelServiceGrpcTransport),
(
CloudChannelServiceAsyncClient,
transports.CloudChannelServiceGrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| googleapis/python-channel | tests/unit/gapic/channel_v1/test_cloud_channel_service.py | Python | apache-2.0 | 319,406 | [
"Octopus"
] | ff0fa04d14db8bd009aa636a22caf6744dd7f560038d66b9a496acd0b997b780 |
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
from rdkit.Chem import Descriptors
from rdkit.Chem.MolKey import InchiInfo
INTERESTING_DESCRIPTORS = dict(
ExactMolWt=Descriptors.ExactMolWt,
FractionCSP3=Descriptors.FractionCSP3,
HeavyAtomCount=Descriptors.HeavyAtomCount,
LabuteASA=Descriptors.LabuteASA,
MolLogP=Descriptors.MolLogP,
MolWt=Descriptors.MolWt,
NHOHCount=Descriptors.NHOHCount,
NOCount=Descriptors.NOCount,
NumAliphaticCarbocycles=Descriptors.NumAliphaticCarbocycles,
NumAliphaticHeterocycles=Descriptors.NumAliphaticHeterocycles,
NumAliphaticRings=Descriptors.NumAliphaticRings,
NumAromaticCarbocycles=Descriptors.NumAromaticCarbocycles,
NumAromaticHeterocycles=Descriptors.NumAromaticHeterocycles,
NumAromaticRings=Descriptors.NumAromaticRings,
NumHAcceptors=Descriptors.NumHAcceptors,
NumHDonors=Descriptors.NumHDonors,
NumRotatableBonds=Descriptors.NumRotatableBonds,
NumSaturatedCarbocycles=Descriptors.NumSaturatedCarbocycles,
NumSaturatedHeterocycles=Descriptors.NumSaturatedHeterocycles,
NumSaturatedRings=Descriptors.NumSaturatedRings,
RingCount=Descriptors.RingCount,
TPSA=Descriptors.TPSA,
NumAmideBonds=rdMolDescriptors.CalcNumAmideBonds,
NumBridgeheadAtoms=rdMolDescriptors.CalcNumBridgeheadAtoms,
NumSpiroAtom=rdMolDescriptors.CalcNumSpiroAtoms
)
def rdkit_descriptors(mol):
"""
Given a molecule, return a dict of lots of interesting descriptors.
:param mol: the molecule to process
"""
dd = {}
if mol is not None:
try:
dd = {k:fn(mol) for k,fn in INTERESTING_DESCRIPTORS.items()}
inchi = Chem.MolToInchi(mol, options='/SUU')
inchi_info = InchiInfo.InchiInfo(inchi).get_sp3_stereo()
(n_stereo, n_undef_stereo, is_meso, dummy) = inchi_info['main']['non-isotopic']
dd['NumChiralCenters'] = n_stereo
dd['NumDefinedChiralCenters'] = n_stereo - n_undef_stereo
dd['NumUndefinedChiralCenters'] = n_undef_stereo
dd['IsMesoStructure'] = is_meso
except (ValueError):
pass
return dd
def rdkit_standardize(mol):
"""
Generate a canonical representation of a molecule.
rdkit_standardize( (rdkit.Chem.Mol)mol ) -> rdkit.Chem.Mol
On error, returns None
"""
std = None
if mol is not None:
try:
std = Chem.RemoveHs(mol)
except ValueError:
pass
return std
def rdkit_smiles(mol):
smiles = None
if mol is not None:
try:
smiles = Chem.MolToSmiles(mol, isomericSmiles=True)
except (RuntimeError, ValueError):
pass
return smiles
def rdkit_mol_from_smiles(smiles):
mol = None
if smiles is not None:
try:
mol = Chem.MolFromSmiles(smiles)
except ValueError:
pass
return mol
| nbateshaus/chem-search | mol-loader/rdkit_utils.py | Python | bsd-3-clause | 2,930 | [
"RDKit"
] | a80beb7c854a8ed17666a551c94994de65938654c8b23702107e489a9916dc70 |
import math
import moose
from PyQt4 import Qt, QtGui, QtCore
from PyQt4.QtCore import QTimer
from PyQt4.QtCore import QObject
from PyQt4.QtCore import pyqtSignal
class Runner(QObject):
"""Helper class to control simulation execution
See: http://doc.qt.digia.com/qq/qq27-responsive-guis.html :
'Solving a Problem Step by Step' for design details.
"""
simulationFinished = pyqtSignal(float)
simulationStarted = pyqtSignal(float)
simulationProgressed = pyqtSignal(float)
simulationReset = pyqtSignal()
simulationContinued = pyqtSignal(float)
def __init__(self):
QtCore.QObject.__init__(self)
self.runTime = None
self.updateInterval = None
self.simulationInterval = None
self.runSequence = None
self.pause = None
self.clock = moose.element("/clock")
def resetSimulation(self, runTime, updateInterval, simulationInterval):
self.runTime = runTime
self.updateInterval = updateInterval
self.simulationInterval = simulationInterval
self.pause = False
moose.reinit()
self.simulationReset.emit()
def computeRunSequence(self, runTime, updateInterval, simulationInterval):
# http://sourceforge.net/p/moose/bugs/147/
runSequence = [ runTime / 20.0 ] * 6
runSequence.extend([runTime / 10.0] * 7)
# runSequence.append()
# frac, whole = math.modf(runTime / updateInterval)
# runSequence = [ updateInterval ] * int(whole)
# remaining = frac * updateInterval
# if remaining > simulationInterval:
# runSequence.append(remaining)
return runSequence
def runSimulation(self, runTime):
self.runTime = runTime
self.runSequence = self.computeRunSequence( self.runTime
, self.updateInterval
, self.simulationInterval
)
self.pause = False
# print(self.runTime)
# print(self.updateInterval)
# print(self.simulationInterval)
# print(self.runSequence)
self.simulationStarted.emit(self.clock.currentTime + self.runTime)
QTimer.singleShot(0, self.next)
def next(self):
if self.pause:
return
if len(self.runSequence) == 0:
self.simulationFinished.emit(self.clock.currentTime)
return
moose.start(self.runSequence.pop(0))
self.simulationProgressed.emit(self.clock.currentTime)
QTimer.singleShot(0, self.next)
def pauseSimulation(self):
self.pause = True
def unpauseSimulation(self):
self.pause = False
self.next()
def togglePauseSimulation(self):
if self.pause :
self.pause = False
self.next()
else:
self.pause = True
def resetAndRunSimulation( self
, runTime
, updateInterval
, simulationInterval
):
self.resetSimulation(runTime, updateInterval, simulationInterval)
self.runSimulation()
| dilawar/moose-full | moose-gui/plugins/Runner.py | Python | gpl-2.0 | 3,353 | [
"MOOSE"
] | d76004e3e2ef47c0658ad83c32f93cf9ccc82c95cf1eac9aefd95c36ac62a804 |
#! /usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-admin-bdii-info
# Author : Aresh Vedaee
########################################################################
"""
Check info on BDII for a given CE or site
"""
from __future__ import print_function
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
def registerSwitches():
'''
Registers all switches that can be used while calling the script from the
command line interface.
'''
Script.registerSwitch( "H:", "host=", "BDII host" )
Script.registerSwitch( "V:", "vo=", "vo" )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... <info> <Site|CE>' % Script.scriptName,
'Arguments:',
' Site: Name of the Site (i.e. CERN-PROD)',
' CE: Name of the CE (i.e. cccreamceli05.in2p3.fr)',
' info: Accepted values (ce|ce-state|ce-cluster|ce-vo|site|site-se)' ] ) )
def parseSwitches():
'''
Parses the arguments passed by the user
'''
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if not len( args ) == 2:
Script.showHelp()
params = {}
params['ce'] = None
params['site'] = None
params['host'] = None
params['vo'] = None
params['info'] = args[0]
ret = getProxyInfo( disableVOMS = True )
if ret['OK'] and 'group' in ret['Value']:
params['vo'] = getVOForGroup( ret['Value']['group'] )
else:
Script.gLogger.error( 'Could not determine VO' )
Script.showHelp()
if params['info'] in ['ce', 'ce-state', 'ce-cluster', 'ce-vo']:
params['ce'] = args[1]
elif params['info']in ['site', 'site-se']:
params['site'] = args[1]
else:
Script.gLogger.error( 'Wrong argument value' )
Script.showHelp()
for unprocSw in Script.getUnprocessedSwitches():
if unprocSw[0] in ( "H", "host" ):
params['host'] = unprocSw[1]
if unprocSw[0] in ( "V", "vo" ):
params['vo'] = unprocSw[1]
return params
def getInfo( params ):
'''
Retrieve information from BDII
'''
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
if params['info'] == 'ce':
result = diracAdmin.getBDIICE( params['ce'], host = params['host'] )
if params['info'] == 'ce-state':
result = diracAdmin.getBDIICEState( params['ce'], useVO = params['vo'], host = params['host'] )
if params['info'] == 'ce-cluster':
result = diracAdmin.getBDIICluster( params['ce'], host = params['host'] )
if params['info'] == 'ce-vo':
result = diracAdmin.getBDIICEVOView( params['ce'], useVO = params['vo'], host = params['host'] )
if params['info'] == 'site':
result = diracAdmin.getBDIISite( params['site'], host = params['host'] )
if params['info'] == 'site-se':
result = diracAdmin.getBDIISE( params['site'], useVO = params['vo'], host = params['host'] )
if not result['OK']:
print(result['Message'])
DIRAC.exit( 2 )
return result
def showInfo( result, info ):
'''
Display information
'''
elements = result['Value']
for element in elements:
if info == 'ce' or info == 'all':
print("CE: %s \n{" % element.get('GlueSubClusterName', 'Unknown'))
if info == 'ce-state' or info == 'all':
print("CE: %s \n{" % element.get('GlueCEUniqueID', 'Unknown'))
if info == 'ce-cluster' or info == 'all':
print("Cluster: %s \n{" % element.get('GlueClusterName', 'Unknown'))
if info == 'ce-vo' or info == 'all':
print("CEVOView: %s \n{" % element.get('GlueChunkKey', 'Unknown'))
if info == 'site' or info == 'all':
print("Site: %s \n{" % element.get('GlueSiteName', 'Unknown'))
if info == 'site-se' or info == 'all':
print("SE: %s \n{" % element.get('GlueSEUniqueID', 'Unknown'))
for item in element.iteritems():
print(" %s: %s" % item)
print("}")
#...............................................................................
if __name__ == "__main__":
#Script initialization
registerSwitches()
#registerUsageMessage()
params = parseSwitches()
result = getInfo( params )
showInfo( result, params['info'] )
DIRAC.exit( 0 )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| petricm/DIRAC | ConfigurationSystem/scripts/dirac-admin-bdii-info.py | Python | gpl-3.0 | 4,812 | [
"DIRAC"
] | b93cfb0c3d1cce020dd6bc29ae0f255636346397832355b423a5fe7ceac0cade |
#!/usr/bin/env python
from __future__ import print_function, absolute_import
import os
import sys
import json
import argparse
import traceback
from fnmatch import fnmatch
from .util import (keynat, tee_outstream_to_file, print_script_header,
print_datetime, timing)
import mdtraj as md
from mdtraj.formats.registry import _FormatRegistry
EXTENSIONS = _FormatRegistry.loaders.keys()
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
try:
from scandir import walk as _walk
except ImportError:
from os import walk as _walk
import warnings
warnings.warn('Get `scandir` for faster performance: https://github.com/benhoyt/scandir')
__doc__ = '''Convert an MD dataset with chunked trajectories into a more standard format
This script will walk down the filesystem, starting in ``root``, looking
for directories which contain one or more files matching ``pattern`` using
shell-style "glob" formatting. In each of these directories, the matching
files will be sorted by filename (natural order), interpreted as chunks
of a single contiguous MD trajectory, and loaded.
[This script assumes that trajectory files in the same leaf directory
are chunks of a contiguous MD trajectory. If that's not the case for your
dataset, this is the WRONG script for you.]
The concatenated trajectory will be saved to disk inside the ``outdir``
directory, under a filename set by the ``outfmt`` format string.
A record of conversion will be saved inside the ``metadata`` JSON Lines file
[http://jsonlines.org/], which contains a newline-delimited collection of
JSON records, each of which is of the form
{"chunks": ["path/to/input-chunk"], "filename": "output-file"}
'''
def walk_project(root, pattern):
for dirpath, dirnames, filenames in _walk(root):
filenames = sorted([os.path.join(dirpath, fn) for fn in filenames if fnmatch(fn, pattern)], key=keynat)
if len(filenames) > 0:
yield tuple(filenames)
def load_chunks(chunk_fns, top, stride, discard_first=True):
trajectories = []
for fn in chunk_fns:
t = md.load(fn, stride=stride, top=top)
if discard_first:
t = t[1:]
trajectories.append(t)
return trajectories[0].join(trajectories[1:])
def parse_args():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('root', help='Root of the directory structure containing '
'the MD trajectories to convert (filesystem path)')
parser.add_argument('outdir', help='Output directory in which to save '
'converted trajectories. default="trajectories/"',
default='trajectories')
parser.add_argument('--outfmt', help=('Output format. This should be a python '
'string format specified, which is parameterized by a '
'single int. The filename extension can specify any '
'supported MDTraj trajectory format ({}). '
'default="traj-%%08d.dcd"').format(', '.join(EXTENSIONS)),
default='traj-%08d.dcd')
parser.add_argument('--pattern', help='Glob pattern for matching trajectory '
'chunks (example: \'frame*.xtc\'). Use single quotes '
'to specify expandable patterns', required=True)
parser.add_argument('--metadata', help='Path to metadata file. default="trajectories.jsonl"',
default='trajectories.json')
parser.add_argument('--discard-first', help='Flag to discard the initial frame '
'in each chunk before concatenating trajectories. This '
'is necessary for some old-style Folding@Home datasets',
action='store_true')
parser.add_argument('--stride', type=int, help='Convert every stride-th '
'frame from the trajectories. default=1', default=1)
parser.add_argument('--topology', help='Path to system topology file (.pdb / '
'.prmtop / .psf)', type=md.core.trajectory._parse_topology,
required=True)
parser.add_argument('--dry-run', help='Trace the execution, without '
'actually running any actions', action='store_true')
parser.add_argument('--log', help='Path to log file to save flat-text '
'logging output. Optional')
parser.parse_args()
args = parser.parse_args()
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
try:
args.outfmt % 1
except TypeError:
parser.error('"%s" is not a valid string format. It should contain '
'a single %%d specifier' % args.outfmt)
return args
def main():
args = parse_args()
if args.log is not None:
tee_outstream_to_file(args.log)
print_script_header()
if os.path.exists(args.metadata):
with open(args.metadata) as f:
metadata = [json.loads(line) for line in f]
else:
metadata = []
for chunk_fns in walk_project(args.root, args.pattern):
if chunk_fns in {tuple(e['chunks']) for e in metadata}:
print('Skipping %s. Already processed' % os.path.dirname(chunk_fns[0]))
continue
try:
with timing('Loading %s: %d files' % (os.path.dirname(chunk_fns[0]), len(chunk_fns))):
traj = load_chunks(chunk_fns, args.topology,
args.stride,
discard_first=args.discard_first)
except (ValueError, RuntimeError):
print('======= Error loading chunks! Skipping ==========', file=sys.stderr)
print('-' * 60)
traceback.print_exc(file=sys.stderr)
print('-' * 60)
continue
out_filename = args.outfmt % len(metadata)
assert out_filename not in {tuple(e['filename']) for e in metadata}
assert not os.path.exists(os.path.join(args.outdir, out_filename))
print('Saving... ', end=' ')
if not args.dry_run:
traj.save(os.path.join(args.outdir, out_filename))
print('%s: [%s]' % (out_filename, ', '.join(os.path.basename(e) for e in chunk_fns)))
metadata_item = {'filename': out_filename, 'chunks': chunk_fns}
metadata.append(metadata_item)
# sync it back to disk
if not args.dry_run:
with open(args.metadata, 'a') as f:
json.dump(metadata_item, f)
f.write('\n')
print()
print_datetime()
print('Finished sucessfully!')
if __name__ == '__main__':
main()
| rmcgibbo/mixtape-scripts | mixtape_scripts/convert_chunked_project.py | Python | mit | 6,781 | [
"MDTraj"
] | 1ba4e990c8b3dba75631f9345658ab0ca73c3b34244cb80e4a4a40afb1e5662a |
from django.conf.urls import patterns, url
from rest_framework import routers
from rest.routers import HybridRouter
import api
import views
router = HybridRouter()
router.add_api_view('hot-box', url(r'^hot-box/', api.HotBoxAPIView.as_view(), name='hot-box'))
urlpatterns = patterns('',
url(r'^trigger-hit/(?P<ct>\d+)/(?P<pk>\d+)/', views.visit_view, name='visit-counter'),
)
| piotrek-golda/CivilHubIndependantCopy | hitcounter/urls.py | Python | gpl-3.0 | 384 | [
"VisIt"
] | fa167216bbf839d9226566702685860c6bc5d8008e57f62ccfd9187d1bf711fe |
#!/usr/bin/env python
# \author Bruno Combal, IOC-UNESCO
# \date July 2014
# removes peak values from a ncdf file
# peak definition: value >= peakVal and surrounding <= low or >= high
# This code was done for cleaning dhm_frequency datasets.
# It may requires adaptation for other kind of data
# note: to have cdms2 library correctly working
# source /usr/local/uvcdat/1.2.0/bin/setup_cdat.sh
import cdms2
import numpy
import os
import string
import sys
import collections
import gc
# __________________
def usage():
text = 'SYNOPSIS:\n\t{0} -low lowFilter -high highFilter -nodata nodata -o outputfileRoot infile*'.format(__file__)
text = text + '\tinfile0: a series netcdf files. The first one is used to detect anomalous points to be removed;\n'
text = text + '\toutputfileRoot root name for the output netcdf file. If exists, will be deleted;\n'
text = text + '\tlowFilter: low values around the single point to remove;\n'
text = text + '\thighFilter: high values around the point to remove;\n'
text = text + '\nodata: nodata value, not changed;\n'
print text
# __________________
def exitMessage(msg, exitCode='1'):
print msg
print
print usage()
sys.exit(exitCode)
# ___________________
def do_detect(infile, var, lowFilter, highFilter, nodata, outfile):
thisFile = cdms2.open(infile)
data = numpy.array(thisFile[var][:])
pointList=[]
pos=[]
for jj in [-1, 0, 1]:
for ii in [-1, 0, 1]:
if ii or jj:
pos.append([jj, ii])
threshold = len(pos)
# borders not processed!
for il in xrange(1, data.shape[0]-1):
for ic in xrange(1, data.shape[1]-1):
if data[il][ic] == highFilter:
counter = 0
for (jj, ii) in pos:
# surrounding must be nodata or > lowFilter
if (data[il+jj][ic+ii] == nodata):
counter = counter + 1
elif (data[il+jj][ic+ii] <= lowFilter):
counter = counter + 1
elif (data[il+jj][ic+ii] >= highFilter):
counter = counter + 1
#test = test + (data[il+jj][ic+ii]!=nodata) * ( data[il+jj][ic+ii]<lowFilter)
if counter >= threshold:
pointList.append([il, ic])
thisFile.close()
return pointList
# ___________________
def do_filter(infile, var, pointList, outfileRoot):
pos=[]
for jj in [-1, 0, 1]:
for ii in [-1, 0, 1]:
if ii or jj:
pos.append([jj, ii])
thisFile = cdms2.open(infile)
data = None
data = thisFile[var][:].copy() # else can not write in the dataset
for (jj, ii) in pointList:
around = []
for (yy, xx) in pos: around.append(data[jj+yy][ii+xx])
values = collections.Counter(around)
data[jj][ii]=values.most_common(1)[0][0] # should work well with integers
# let's write an output
# get the file path
thisPath = os.path.dirname(infile)
# get the file name
fname = os.path.basename(infile)
# build the output: delete if exists
outfile = os.path.join(thisPath, '{0}_{1}'.format(outfileRoot, fname) )
if os.path.exists(outfile): os.remove(outfile)
# write the file
print 'writing result to ',outfile
thisOut = cdms2.open(outfile,'w')
var = cdms2.createVariable(data, id=var, grid=thisFile[var].getGrid())
thisOut.write(var)
thisOut.close()
thisFile.close()
del data
del around
gc.collect()
return
# ___________________
if __name__=="__main__":
infile=[]
outfileRoot=None
lowFilter=0
highFilter=10
nodata=-1
var=None #'lvl2_freq'
cdms2.setNetcdfShuffleFlag(1)
cdms2.setNetcdfDeflateFlag(1)
cdms2.setNetcdfDeflateLevelFlag(3)
# read input parameter
ii=1
while ii < len(sys.argv):
arg = sys.argv[ii].lower()
if arg == '-o':
ii = ii + 1
outfileRoot = sys.argv[ii]
elif arg=='-v':
ii = ii + 1
var = sys.argv[ii]
else:
infile.append(sys.argv[ii])
ii = ii + 1
# check parameters
if outfileRoot is None:
exitMessage('Missing an output file name, use option -o. Exit(2).',2)
if len(infile)==0:
exitMessage('Missing input file name(s). Exit(3).', 3)
for thisFile in infile:
if not os.path.exists(thisFile):
exitMessage('Input file does not exist. Exit(4).',4)
print 'Getting reference points: '
pointList = do_detect(infile[0], var, lowFilter, highFilter, nodata, outfileRoot)
if len(pointList)==0:
exitMessage('Found no point to correct')
print 'Found {0} points'.format(len(pointList))
# now filter all files in the series
for thisFile in infile:
do_filter(thisFile, var, pointList, outfileRoot)
# end of script
| BrunoCombal/misc | filter_peaks.py | Python | gpl-2.0 | 4,958 | [
"NetCDF"
] | a1edc507b76de6e9400b25ae107b8f577f25486f075fc8f983a98ed34a9b6e03 |
import sys, os, platform
import numpy
from PyQt5.QtWidgets import QApplication, QSizePolicy
from PyQt5 import QtGui, QtWidgets
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui, congruence
from orangecontrib.xoppy.widgets.gui.ow_xoppy_widget import XoppyWidget
import orangecanvas.resources as resources
from oasys.util.oasys_util import EmittingStream, TTYGrabber
import syned.beamline.beamline as synedb
import syned.storage_ring.magnetic_structures.insertion_device as synedid
from syned.widget.widget_decorator import WidgetDecorator
from syned.storage_ring.magnetic_structures.undulator import Undulator
from syned.storage_ring.electron_beam import ElectronBeam
import scipy.constants as codata
from silx.gui.plot import Plot2D
from orangecontrib.xoppy.util.messages import showCriticalMessage
from orangecontrib.xoppy.util.srcalc.srcalc import load_srcalc_output_file, ray_tracing
from orangecontrib.xoppy.util.srcalc.srcalc import compute_power_density_footprint, compute_power_density_image
from orangecontrib.xoppy.util.srcalc.srcalc import trapezoidal_rule_2d, trapezoidal_rule_2d_1darrays
from orangecontrib.xoppy.util.srcalc.srcalc import write_ansys_files
from orangecontrib.xoppy.util.xoppy_util import locations
from orangecontrib.xoppy.widgets.gui.image_view_with_fwhm import ImageViewWithFWHM
#
# TODO: Recompile IDPower with higher dimensions
# with better format: Pow. ref(W) Pow. abs.(W)
# Mirror 1 *********** ***********
class OWsrcalc_idpower(XoppyWidget, WidgetDecorator):
IS_DEVELOP = False if not "OASYSDEVELOP" in os.environ.keys() else str(os.environ.get('OASYSDEVELOP')) == "1"
name = "SRCALC-IDPOWER"
id = "srcalc_idpower"
description = "Power Absorbed and Transmitted by Optical Elements"
icon = "icons/srcalc.png"
priority = 1
category = ""
keywords = ["srcalc", "IDPower", "power", "Reininger", "OASYS"]
RING_ENERGY = Setting(2.0)
RING_CURRENT = Setting(0.5)
KY = Setting(3.07)
KX = Setting(0.0)
NUMBER_OF_PERIODS = Setting(137)
PERIOD_LENGTH = Setting(0.0288)
NUMBER_OF_HARMONICS = Setting(-28)
SOURCE_SCREEN_DISTANCE = Setting(15.00)
HORIZONTAL_ACCEPTANCE = Setting(30.0)
VERTICAL_ACCEPTANCE = Setting(15.0)
NUMBER_OF_POINTS_H = Setting(31)
NUMBER_OF_POINTS_V = Setting(21)
ELECTRON_SIGMAS = Setting(4)
SIGMAX = Setting(12.1e-3)
SIGMAXP = Setting(5.7e-3)
SIGMAY = Setting(14.7e-3)
SIGMAYP = Setting(4.7e-3)
NELEMENTS = Setting(1)
EL0_SHAPE = Setting(5)
EL0_P_POSITION = Setting(15.00) # this is then copied from SOURCE_SCREEN_DISTANCE
EL0_Q_POSITION = Setting(5.0)
EL0_P_FOCUS = Setting(15.0)
EL0_Q_FOCUS = Setting(5.0)
EL0_ANG = Setting(88.75)
EL0_THICKNESS = Setting(1000)
EL0_RELATIVE_TO_PREVIOUS = Setting(0)
EL0_COATING = Setting(9)
EL1_SHAPE = Setting(2)
EL1_P_POSITION = Setting(1.0)
EL1_Q_POSITION = Setting(16.0)
EL1_P_FOCUS = Setting(16.0)
EL1_Q_FOCUS = Setting(16.0)
EL1_ANG = Setting(88.75)
EL1_THICKNESS = Setting(1000)
EL1_RELATIVE_TO_PREVIOUS = Setting(2)
EL1_COATING = Setting(1)
EL2_SHAPE = Setting(2)
EL2_P_POSITION = Setting(10.0)
EL2_Q_POSITION = Setting(0.0)
EL2_P_FOCUS = Setting(10.0)
EL2_Q_FOCUS = Setting(10.0)
EL2_ANG = Setting(88.75)
EL2_THICKNESS = Setting(1000)
EL2_RELATIVE_TO_PREVIOUS = Setting(0)
EL2_COATING = Setting(9)
EL3_SHAPE = Setting(2)
EL3_P_POSITION = Setting(10.0)
EL3_Q_POSITION = Setting(0.0)
EL3_P_FOCUS = Setting(10.0)
EL3_Q_FOCUS = Setting(10.0)
EL3_ANG = Setting(88.75)
EL3_THICKNESS = Setting(1000)
EL3_RELATIVE_TO_PREVIOUS = Setting(0)
EL3_COATING = Setting(9)
EL4_SHAPE = Setting(2)
EL4_P_POSITION = Setting(10.0)
EL4_Q_POSITION = Setting(0.0)
EL4_P_FOCUS = Setting(10.0)
EL4_Q_FOCUS = Setting(10.0)
EL4_ANG = Setting(88.75)
EL4_THICKNESS = Setting(1000)
EL4_RELATIVE_TO_PREVIOUS = Setting(0)
EL4_COATING = Setting(9)
EL5_SHAPE = Setting(2)
EL5_P_POSITION = Setting(10.0)
EL5_Q_POSITION = Setting(0.0)
EL5_P_FOCUS = Setting(10.0)
EL5_Q_FOCUS = Setting(10.0)
EL5_ANG = Setting(88.75)
EL5_THICKNESS = Setting(1000)
EL5_RELATIVE_TO_PREVIOUS = Setting(0)
EL5_COATING = Setting(9)
RAY_TRACING_IMAGE = Setting(1)
RAY_TRACING_RUNS = Setting(5)
RAY_TRACING_SEED = Setting(123456)
PLOT_MODE = Setting(2)
DO_PLOT_GRID = Setting(0)
DUMP_ANSYS_FILES = Setting(0)
SHOW_URGENT_PLOTS = Setting(0) # 0 only source, 1 all
ORIENTATION_LOGIC = Setting(1) # 0=shadow, 1=Lab
INTERPOLATION_OR_HISTOGRAMMING = Setting(0) # 0 interpolation, 1 histogramming
INTERPOLATION_METHOD = Setting(2) # 0 linear, 1 nearest, 2 cubic
RATIO_PIXELS_0 = Setting(1.0)
RATIO_PIXELS_1 = Setting(1.0)
DEBUG_RUN_URGENT = Setting(0)
inputs = WidgetDecorator.syned_input_data()
def __init__(self):
super().__init__()
info_tab = oasysgui.createTabPage(self.main_tabs, "Info")
self.info_output = QtWidgets.QTextEdit()
self.info_output.setReadOnly(True)
info_tab.layout().addWidget(self.info_output)
def resetSettings(self):
pass
def build_gui(self):
self.leftWidgetPart.setSizePolicy(QSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding))
self.leftWidgetPart.setMaximumWidth(self.CONTROL_AREA_WIDTH + 20)
self.leftWidgetPart.updateGeometry()
self.controls_tabs = oasysgui.tabWidget(self.controlArea)
box = oasysgui.createTabPage(self.controls_tabs, "Light Source")
idx = -1
########
idx += 1
box1 = gui.widgetBox(box)
self.id_RING_ENERGY = oasysgui.lineEdit(box1, self, "RING_ENERGY",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
########
idx += 1
box1 = gui.widgetBox(box)
self.id_RING_CURRENT = oasysgui.lineEdit(box1, self, "RING_CURRENT",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
########
idx += 1
box1 = gui.widgetBox(box)
self.id_KY = oasysgui.lineEdit(box1, self, "KY",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
########
idx += 1
box1 = gui.widgetBox(box)
self.id_KX = oasysgui.lineEdit(box1, self, "KX",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
########
idx += 1
box1 = gui.widgetBox(box)
self.id_NUMBER_OF_PERIODS = oasysgui.lineEdit(box1, self, "NUMBER_OF_PERIODS",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
########
idx += 1
box1 = gui.widgetBox(box)
self.id_PERIOD_LENGTH = oasysgui.lineEdit(box1, self, "PERIOD_LENGTH",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
########
idx += 1
box1 = gui.widgetBox(box)
self.id_SIGMAX = oasysgui.lineEdit(box1, self, "SIGMAX",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
########
idx += 1
box1 = gui.widgetBox(box)
self.id_SIGMAXP = oasysgui.lineEdit(box1, self, "SIGMAXP",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
########
idx += 1
box1 = gui.widgetBox(box)
self.id_SIGMAY = oasysgui.lineEdit(box1, self, "SIGMAY",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
########
idx += 1
box1 = gui.widgetBox(box)
self.id_SIGMAYP = oasysgui.lineEdit(box1, self, "SIGMAYP",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
##########################
box = oasysgui.createTabPage(self.controls_tabs, "Calculation")
##########################
########
idx += 1
box1 = gui.widgetBox(box, orientation="horizontal")
oasysgui.lineEdit(box1, self, "NUMBER_OF_HARMONICS",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=150)
self.show_at(self.unitFlags()[idx], box1)
gui.button(box1 , self, "Guess", callback=self.guess_number_of_harmonics, height=25)
########
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "SOURCE_SCREEN_DISTANCE",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250,
callback=self.setdistance)
self.show_at(self.unitFlags()[idx], box1)
########
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "HORIZONTAL_ACCEPTANCE",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
########
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "VERTICAL_ACCEPTANCE",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
########
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "NUMBER_OF_POINTS_H",
label=self.unitLabels()[idx], addSpace=False,
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
########
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "NUMBER_OF_POINTS_V",
label=self.unitLabels()[idx], addSpace=False,
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
########
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "ELECTRON_SIGMAS",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
##########################
box = oasysgui.createTabPage(self.controls_tabs, "Beamline")
##########################
#widget index 10
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "NELEMENTS",
label=self.unitLabels()[idx], addSpace=False,
items=['0', '1', '2', '3', '4', '5','6'],
valueType=int, orientation="horizontal", callback=self.set_NELEMENTS,
labelWidth=330)
self.show_at(self.unitFlags()[idx], box1)
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
self.shape_list = [
"Toroidal mirror",
"Spherical mirror",
"Plane mirror",
"MerCyl mirror",
"SagCyl mirror",
"Ellipsoidal mirror",
"MerEll mirror",
"SagEllip mirror",
"Filter",
"Crystal"]
self.coating_list = ["Al","Au","Cr","Dia","Gra","InSb","MGF2","Ni","Pd","Rh","SiC","Test","Al2O3","Be","Cu","Fe","Ice","Ir","Mo","Os","Pt","Si","SiO2","WW","B4C"]
tabs_elements = oasysgui.tabWidget(box)
self.tab_el = []
self.tab_el.append( oasysgui.createTabPage(tabs_elements, "o.e. 1") )
self.tab_el.append( oasysgui.createTabPage(tabs_elements, "o.e. 2") )
self.tab_el.append( oasysgui.createTabPage(tabs_elements, "o.e. 3") )
self.tab_el.append( oasysgui.createTabPage(tabs_elements, "o.e. 4") )
self.tab_el.append( oasysgui.createTabPage(tabs_elements, "o.e. 5") )
self.tab_el.append( oasysgui.createTabPage(tabs_elements, "o.e. 6") )
for element_index in range(6):
#widget index xx
idx += 1
box1 = gui.widgetBox(self.tab_el[element_index])
gui.comboBox(box1, self, "EL%d_SHAPE"%element_index,
label=self.unitLabels()[idx], addSpace=False,
items=self.shape_list,
valueType=int, orientation="horizontal", callback=self.set_EL_FLAG, labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index xx
idx += 1
box1 = gui.widgetBox(self.tab_el[element_index])
oasysgui.lineEdit(box1, self, "EL%d_P_POSITION"%element_index,
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=300)
self.show_at(self.unitFlags()[idx], box1)
# first element distance is the same as urgent screen position
if element_index == 0:
box1.setEnabled(False)
#widget index xx
idx += 1
box1 = gui.widgetBox(self.tab_el[element_index])
oasysgui.lineEdit(box1, self, "EL%d_Q_POSITION"%element_index,
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=300)
self.show_at(self.unitFlags()[idx], box1)
#widget index xx
idx += 1
box1 = gui.widgetBox(self.tab_el[element_index])
oasysgui.lineEdit(box1, self, "EL%d_P_FOCUS"%element_index,
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index xx
idx += 1
box1 = gui.widgetBox(self.tab_el[element_index])
oasysgui.lineEdit(box1, self, "EL%d_Q_FOCUS"%element_index,
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index xx
idx += 1
box1 = gui.widgetBox(self.tab_el[element_index])
oasysgui.lineEdit(box1, self, "EL%d_ANG"%element_index,
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index xx
idx += 1
box1 = gui.widgetBox(self.tab_el[element_index])
oasysgui.lineEdit(box1, self, "EL%d_THICKNESS"%element_index,
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index xx
idx += 1
box1 = gui.widgetBox(self.tab_el[element_index])
gui.comboBox(box1, self, "EL%d_RELATIVE_TO_PREVIOUS"%element_index,
label=self.unitLabels()[idx], addSpace=False,
items=['Left (90)','Right (270)','Up (0)','Down (180)'],
valueType=int, orientation="horizontal", callback=self.set_EL_FLAG, labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index xx
idx += 1
box1 = gui.widgetBox(self.tab_el[element_index])
gui.comboBox(box1, self, "EL%d_COATING"%element_index,
label=self.unitLabels()[idx], addSpace=False,
items=self.coating_list,
valueType=int, orientation="horizontal", callback=self.set_EL_FLAG, labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index xx
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
gui.comboBox(box1, self, "RAY_TRACING_IMAGE",
label=self.unitLabels()[idx], addSpace=False,
items=['No', 'Yes'],
valueType=int, orientation="horizontal", labelWidth=350)
self.show_at(self.unitFlags()[idx], box1)
# widget index xx
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
oasysgui.lineEdit(box1, self, "RAY_TRACING_RUNS",
label=self.unitLabels()[idx], addSpace=False,
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
# widget index xx
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
oasysgui.lineEdit(box1, self, "RAY_TRACING_SEED",
label=self.unitLabels()[idx], addSpace=False,
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#
# Setting page
#
box0 = oasysgui.createTabPage(self.controls_tabs, "Settings")
box = gui.widgetBox(box0,"Plots")
#widget index xx
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
gui.comboBox(box1, self, "PLOT_MODE",
label=self.unitLabels()[idx], addSpace=False,
items=['Basic image', 'Image and histograms', 'Image [default]'],
valueType=int, orientation="horizontal", labelWidth=350,
callback=self.set_ViewType)
self.show_at(self.unitFlags()[idx], box1)
#widget index xx
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
gui.comboBox(box1, self, "DO_PLOT_GRID",
label=self.unitLabels()[idx], addSpace=False,
items=['No [default]', 'Yes (overplotted)', 'Yes (in a new tab)'],
valueType=int, orientation="horizontal", labelWidth=350,
callback=self.set_ViewType)
self.show_at(self.unitFlags()[idx], box1)
#widget index xx
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
gui.comboBox(box1, self, "SHOW_URGENT_PLOTS",
label=self.unitLabels()[idx], addSpace=False,
items=['Only source [default]', 'Source + elements'],
valueType=int, orientation="horizontal", labelWidth=350,)
self.show_at(self.unitFlags()[idx], box1)
box = gui.widgetBox(box0,"Files")
#widget index xx
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
gui.comboBox(box1, self, "DUMP_ANSYS_FILES",
label=self.unitLabels()[idx], addSpace=False,
items=['No [default]', 'Yes (as plotted)', 'Yes (transposed)'],
valueType=int, orientation="horizontal", labelWidth=350)
self.show_at(self.unitFlags()[idx], box1)
box = gui.widgetBox(box0,"Processing")
# widget index xx
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
gui.comboBox(box1, self, "ORIENTATION_LOGIC",
label=self.unitLabels()[idx], addSpace=False,
items=['relative to previous o.e. [like SHADOW]',
'relative to lab frame [default]'],
valueType=int, orientation="horizontal", labelWidth=125)
self.show_at(self.unitFlags()[idx], box1)
# widget index xx
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
gui.comboBox(box1, self, "INTERPOLATION_OR_HISTOGRAMMING",
label=self.unitLabels()[idx], addSpace=False,
items=['Interpolation [default]', 'Histogramming'],
valueType=int, orientation="horizontal", labelWidth=350)
self.show_at(self.unitFlags()[idx], box1)
# widget index xx
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
gui.comboBox(box1, self, "INTERPOLATION_METHOD",
label=self.unitLabels()[idx], addSpace=False,
items=['nearest', 'linear', 'cubic [default]'],
valueType=int, orientation="horizontal", labelWidth=350)
self.show_at(self.unitFlags()[idx], box1)
self.show_at(self.unitFlags()[idx], box1)
# widget index xx
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
oasysgui.lineEdit(box1, self, "RATIO_PIXELS_0",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
# widget index xx
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
oasysgui.lineEdit(box1, self, "RATIO_PIXELS_1",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
box = gui.widgetBox(box0,"Debug")
#
# widget index xx
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
gui.comboBox(box1, self, "DEBUG_RUN_URGENT",
label=self.unitLabels()[idx], addSpace=False,
items=['No [default]', 'Yes [skip running URGENT]'],
valueType=int, orientation="horizontal", labelWidth=350)
self.show_at(self.unitFlags()[idx], box1)
#
#
#
self.mirror_tabs_visibility()
def guess_number_of_harmonics(self):
syned_undulator = Undulator(
K_vertical = self.KY,
K_horizontal = self.KX,
period_length = self.PERIOD_LENGTH,
number_of_periods = self.NUMBER_OF_PERIODS)
Bx = syned_undulator.magnetic_field_horizontal()
By = syned_undulator.magnetic_field_vertical()
Ec = 665.0 * self.RING_ENERGY**2 * numpy.sqrt( Bx**2 + By**2)
E1 = syned_undulator.resonance_energy(self.gamma(), harmonic=1)
self.NUMBER_OF_HARMONICS = -(numpy.floor(numpy.abs(10*Ec/E1))+5)
# UnNh = -(numpy.floor(numpy.abs(10 * Ec / E1)) + 5) # Number of harmonics in calculation
def setdistance(self):
self.EL0_P_POSITION = self.SOURCE_SCREEN_DISTANCE
def set_NELEMENTS(self):
self.initializeTabs()
self.mirror_tabs_visibility()
def mirror_tabs_visibility(self):
for i in range(6):
if (i+1) <= self.NELEMENTS:
self.tab_el[i].setEnabled(True)
else:
self.tab_el[i].setEnabled(False)
def set_EL_FLAG(self):
self.initializeTabs()
def unitLabels(self):
labels = ["Ring energy [GeV]","Ring current [A]","Ky","Kx",
"Number of Periods","Period Length [m]",
"Sigma H [mm]", "Sigma Prime H [mrad]", "Sigma V [mm]", "Sigma Prime V [mrad]",
"Number of harmonics",
"Source to screen distance [m]","Horizontal acceptance [mm]","Vertical acceptance [mm]",
"Number of intervals in half H screen","Number of intervals in half V screen","Electron sigmas",
'Number of optical elements:']
for i in range(6):
labels = labels + [
'Type',
'Distance from previous cont. plane [m]',
'Distance to next continuation plane [m]',
'Focus Entrance Arm [m]',
'Focus Exit Arm [m]',
'Inc. Angle to normal [deg]',
'Thickness [nanometers]',
'Orientation (see Settings tab)',
'Coating',
]
labels = labels + ["Calculate power on images","Number of ray-tracing runs","Random seed (int): ",
"Plot mode","Plot ray-traced grid","Show URGENT plots",
"Write FEA/ANSYS files",
"O.E. orientation","Calculation method for images","Interpolation",
"Ratio pixels axis 0 o.e./source","Ratio pixels axis 1 o.e./source",
"Debug mode (do not run URGENT)"]
return labels
def unitFlags(self):
# labels = ["Ring energy [GeV]","Ring current [A]","Ky","Kx",
# "Number of Periods","Period Length [m]",
# "Sigma H [mm]", "Sigma Prime H [mrad]", "Sigma V [mm]", "Sigma Prime V [mrad]",
# "Number of harmonics",
# "Source to screen distance [m]","Horizontal acceptance [mm]","Vertical acceptance [mm]",
# "Number of intervals in half H screen","Number of intervals in half V screen","Electron sigmas",
# 'Number of optical elements:']
return ["True", "True", "True", "True",
"True", "True",
"True", "True", "True", "True",
"True",
"True", "True", "True",
"True", "True","True",
'True', #
"True", "True", "True", "self.EL0_SHAPE not in (2,8,9)", "self.EL0_SHAPE not in (2,8,9)", "True", "self.EL0_SHAPE in (8,9)", "True", "True", # shape, p, q, p_foc, q_foc, angle, thickness, orientation, coating
"True", "True", "True", "self.EL1_SHAPE not in (2,8,9)", "self.EL1_SHAPE not in (2,8,9)", "True", "self.EL1_SHAPE in (8,9)", "True", "True", # OE fields
"True", "True", "True", "self.EL2_SHAPE not in (2,8,9)", "self.EL2_SHAPE not in (2,8,9)", "True", "self.EL2_SHAPE in (8,9)", "True", "True", # OE fields
"True", "True", "True", "self.EL3_SHAPE not in (2,8,9)", "self.EL3_SHAPE not in (2,8,9)", "True", "self.EL3_SHAPE in (8,9)", "True", "True", # OE fields
"True", "True", "True", "self.EL4_SHAPE not in (2,8,9)", "self.EL4_SHAPE not in (2,8,9)", "True", "self.EL4_SHAPE in (8,9)", "True", "True", # OE fields
"True", "True", "True", "self.EL5_SHAPE not in (2,8,9)", "self.EL5_SHAPE not in (2,8,9)", "True", "self.EL5_SHAPE in (8,9)", "True", "True", # OE fields
'True', 'self.RAY_TRACING_IMAGE == 1', 'self.RAY_TRACING_IMAGE == 1',
'True', 'True', 'True',
'True',
'True', 'True', 'True',
'True', 'True',
'True']
# labels = labels + ["Calculate power on images", "Number of ray-tracing runs", "Random seed (int): ",
# "Plot mode", "Plot ray-traced grid", "Show URGENT plots",
# "Write SHADOW files", "Write FEA/ANSYS files",
# "Calculation method for images", "Interpolation",
# "Ratio pixels axis 0 o.e./source", "Ratio pixels axis 1 o.e./source",
# "Debug mode (do not run URGENT)"]
def get_help_name(self):
return 'srcalc-idpower'
def selectFile(self):
self.le_source_file.setText(oasysgui.selectFileFromDialog(self, self.SOURCE_FILE, "Open Source File", file_extension_filter="*.*"))
def check_fields(self):
self.RING_ENERGY = congruence.checkPositiveNumber(self.RING_ENERGY , "RING_ENERGY ")
self.RING_CURRENT = congruence.checkPositiveNumber(self.RING_CURRENT , "RING_CURRENT ")
self.KY = congruence.checkPositiveNumber(self.KY , "KY ")
self.KX = congruence.checkPositiveNumber(self.KX , "KX ")
self.NUMBER_OF_PERIODS = congruence.checkPositiveNumber(self.NUMBER_OF_PERIODS, "NUMBER_OF_PERIODS")
self.PERIOD_LENGTH = congruence.checkPositiveNumber(self.PERIOD_LENGTH , "PERIOD_LENGTH ")
self.NUMBER_OF_HARMONICS = congruence.checkNumber(self.NUMBER_OF_HARMONICS, "NUMBER_OF_HARMONICS")
self.SOURCE_SCREEN_DISTANCE = congruence.checkPositiveNumber(self.SOURCE_SCREEN_DISTANCE, "SOURCE_SCREEN_DISTANCE")
self.HORIZONTAL_ACCEPTANCE = congruence.checkPositiveNumber(self.HORIZONTAL_ACCEPTANCE , "HORIZONTAL_ACCEPTANCE ")
self.VERTICAL_ACCEPTANCE = congruence.checkPositiveNumber(self.VERTICAL_ACCEPTANCE , "VERTICAL_ACCEPTANCE ")
self.NUMBER_OF_POINTS_H = congruence.checkPositiveNumber(self.NUMBER_OF_POINTS_H , "NUMBER_OF_POINTS_H ")
self.NUMBER_OF_POINTS_V = congruence.checkPositiveNumber(self.NUMBER_OF_POINTS_V , "NUMBER_OF_POINTS_V ")
self.ELECTRON_SIGMAS = congruence.checkPositiveNumber(self.ELECTRON_SIGMAS , "ELECTRON_SIGMAS ")
self.SIGMAX = congruence.checkPositiveNumber(self.SIGMAX , "SIGMAX ")
self.SIGMAXP = congruence.checkPositiveNumber(self.SIGMAXP , "SIGMAXP ")
self.SIGMAY = congruence.checkPositiveNumber(self.SIGMAY , "SIGMAY ")
self.SIGMAYP = congruence.checkPositiveNumber(self.SIGMAYP , "SIGMAYP ")
if self.NELEMENTS >=6:
self.EL5_P_POSITION = congruence.checkPositiveNumber(self.EL5_P_POSITION, "EL5_P_POSITION")
self.EL5_Q_POSITION = congruence.checkPositiveNumber(self.EL5_Q_POSITION, "EL5_Q_POSITION")
self.EL5_P_FOCUS = congruence.checkPositiveNumber(self.EL5_P_FOCUS, "EL5_P_FOCUS")
self.EL5_Q_FOCUS = congruence.checkPositiveNumber(self.EL5_Q_FOCUS, "EL5_Q_FOCUS")
self.EL5_ANG = congruence.checkPositiveNumber(self.EL5_ANG, "EL5_ANG")
self.EL5_THICKNESS = congruence.checkPositiveNumber(self.EL5_THICKNESS, "EL5_THICKNESS")
if self.NELEMENTS >=5:
self.EL4_P_POSITION = congruence.checkPositiveNumber(self.EL4_P_POSITION, "EL4_P_POSITION")
self.EL4_Q_POSITION = congruence.checkPositiveNumber(self.EL4_Q_POSITION, "EL4_Q_POSITION")
self.EL4_P_FOCUS = congruence.checkPositiveNumber(self.EL4_P_FOCUS, "EL4_P_FOCUS")
self.EL4_Q_FOCUS = congruence.checkPositiveNumber(self.EL4_Q_FOCUS, "EL4_Q_FOCUS")
self.EL4_ANG = congruence.checkPositiveNumber(self.EL4_ANG, "EL4_ANG")
self.EL4_THICKNESS = congruence.checkPositiveNumber(self.EL4_THICKNESS, "EL4_THICKNESS")
if self.NELEMENTS >=4:
self.EL3_P_POSITION = congruence.checkPositiveNumber(self.EL3_P_POSITION, "EL3_P_POSITION")
self.EL3_Q_POSITION = congruence.checkPositiveNumber(self.EL3_Q_POSITION, "EL3_Q_POSITION")
self.EL3_P_FOCUS = congruence.checkPositiveNumber(self.EL3_P_FOCUS, "EL3_P_FOCUS")
self.EL3_Q_FOCUS = congruence.checkPositiveNumber(self.EL3_Q_FOCUS, "EL3_Q_FOCUS")
self.EL3_ANG = congruence.checkPositiveNumber(self.EL3_ANG, "EL3_ANG")
self.EL3_THICKNESS = congruence.checkPositiveNumber(self.EL3_THICKNESS, "EL3_THICKNESS")
if self.NELEMENTS >=3:
self.EL2_P_POSITION = congruence.checkPositiveNumber(self.EL2_P_POSITION, "EL2_P_POSITION")
self.EL2_Q_POSITION = congruence.checkPositiveNumber(self.EL2_Q_POSITION, "EL2_Q_POSITION")
self.EL2_P_FOCUS = congruence.checkPositiveNumber(self.EL2_P_FOCUS, "EL2_P_FOCUS")
self.EL2_Q_FOCUS = congruence.checkPositiveNumber(self.EL2_Q_FOCUS, "EL2_Q_FOCUS")
self.EL2_ANG = congruence.checkPositiveNumber(self.EL2_ANG, "EL2_ANG")
self.EL2_THICKNESS = congruence.checkPositiveNumber(self.EL2_THICKNESS, "EL2_THICKNESS")
if self.NELEMENTS >=2:
self.EL1_P_POSITION = congruence.checkPositiveNumber(self.EL1_P_POSITION, "EL1_P_POSITION")
self.EL1_Q_POSITION = congruence.checkPositiveNumber(self.EL1_Q_POSITION, "EL1_Q_POSITION")
self.EL1_P_FOCUS = congruence.checkPositiveNumber(self.EL1_P_FOCUS, "EL1_P_FOCUS")
self.EL1_Q_FOCUS = congruence.checkPositiveNumber(self.EL1_Q_FOCUS, "EL1_Q_FOCUS")
self.EL1_ANG = congruence.checkPositiveNumber(self.EL1_ANG, "EL1_ANG")
self.EL1_THICKNESS = congruence.checkPositiveNumber(self.EL1_THICKNESS, "EL1_THICKNESS")
if self.NELEMENTS >=1:
self.EL0_P_POSITION = congruence.checkPositiveNumber(self.EL0_P_POSITION, "EL0_P_POSITION")
self.EL0_Q_POSITION = congruence.checkPositiveNumber(self.EL0_Q_POSITION, "EL0_Q_POSITION")
self.EL0_P_FOCUS = congruence.checkPositiveNumber(self.EL0_P_FOCUS, "EL0_P_FOCUS")
self.EL0_Q_FOCUS = congruence.checkPositiveNumber(self.EL0_Q_FOCUS, "EL0_Q_FOCUS")
self.EL0_ANG = congruence.checkPositiveNumber(self.EL0_ANG, "EL0_ANG")
self.EL0_THICKNESS = congruence.checkPositiveNumber(self.EL0_THICKNESS, "EL0_THICKNESS")
def receive_syned_data(self, data):
if isinstance(data, synedb.Beamline):
if not data._light_source is None and isinstance(data._light_source._magnetic_structure, synedid.InsertionDevice):
light_source = data._light_source
self.RING_ENERGY = light_source._electron_beam._energy_in_GeV
self.RING_CURRENT = light_source._electron_beam._current
x, xp, y, yp = light_source._electron_beam.get_sigmas_all()
self.SIGMAX = x * 1e3
self.SIGMAY = y * 1e3
self.SIGMAXP = xp * 1e3
self.SIGMAYP = yp * 1e3
self.PERIOD_LENGTH = light_source._magnetic_structure._period_length
self.NUMBER_OF_PERIODS = int(light_source._magnetic_structure._number_of_periods)
self.KY = light_source._magnetic_structure._K_vertical
self.KX = light_source._magnetic_structure._K_horizontal
self.set_enabled(False)
else:
self.set_enabled(True)
# raise ValueError("Syned data not correct")
else:
self.set_enabled(True)
# raise ValueError("Syned data not correct")
def set_enabled(self,value):
if value == True:
self.id_RING_ENERGY.setEnabled(True)
self.id_SIGMAX.setEnabled(True)
self.id_SIGMAY.setEnabled(True)
self.id_SIGMAXP.setEnabled(True)
self.id_SIGMAYP.setEnabled(True)
self.id_RING_CURRENT.setEnabled(True)
self.id_PERIOD_LENGTH.setEnabled(True)
self.id_NUMBER_OF_PERIODS.setEnabled(True)
self.id_KX.setEnabled(True)
self.id_KY.setEnabled(True)
else:
self.id_RING_ENERGY.setEnabled(False)
self.id_SIGMAX.setEnabled(False)
self.id_SIGMAY.setEnabled(False)
self.id_SIGMAXP.setEnabled(False)
self.id_SIGMAYP.setEnabled(False)
self.id_RING_CURRENT.setEnabled(False)
self.id_PERIOD_LENGTH.setEnabled(False)
self.id_NUMBER_OF_PERIODS.setEnabled(False)
self.id_KX.setEnabled(False)
self.id_KY.setEnabled(False)
def do_xoppy_calculation(self):
self.current_index = self.tabs.currentIndex()
# odd way to clean output window during running
view_type_old = self.view_type
self.view_type = 0
self.set_ViewType()
out = self.xoppy_calc_srcalc()
self.view_type = view_type_old
self.set_ViewType()
return out
def extract_data_from_xoppy_output(self, calculation_output):
return calculation_output
def plot_results(self, calculated_data, progressBarValue=70):
if not self.view_type == 0:
if calculated_data is None:
raise Exception("Empty Data")
index = -1
for oe_n in range(self.NELEMENTS+1):
#
# urgent results
#
if oe_n == 0 or self.SHOW_URGENT_PLOTS == 1:
totPower2 = trapezoidal_rule_2d_1darrays(calculated_data["Zlist"][oe_n])
if oe_n == 0:
title = 'Power density [W/mm2] at %4.1f m, Integrated Power: %6.1f W' % (
self.SOURCE_SCREEN_DISTANCE, totPower2)
xtitle = 'H (urgent) [mm] (%d pixels)' % (calculated_data["X"].size)
ytitle = 'V (urgent) [mm] (%d pixels)' % (calculated_data["Y"].size)
x = calculated_data["X"]
y = calculated_data["Y"]
else:
title = 'Power density [W/mm2] transmitted after element %d Integrated Power: %6.1f W' % (
oe_n, totPower2)
xtitle = 'H [pixels]'
ytitle = 'V [pixels]'
x = numpy.arange(calculated_data["X"].size)
y = numpy.arange(calculated_data["Y"].size)
index += 1
z = (calculated_data["Zlist"][oe_n]).copy()
z /= (calculated_data["X"][1] - calculated_data["X"][0]) * \
(calculated_data["Y"][1] - calculated_data["Y"][0])
self.plot_data2D(z, x, y, index, 0, mode=self.PLOT_MODE,
xtitle=xtitle, ytitle=ytitle, title=title)
#
# ray tracing results
#
if oe_n > 0:
overplot_data_footprint = None
overplot_data_image = None
if self.DO_PLOT_GRID == 0:
pass
elif self.DO_PLOT_GRID == 1:
overplot_data_footprint = [
1e3 * calculated_data["OE_FOOTPRINT"][oe_n - 1][0, :],
1e3 * calculated_data["OE_FOOTPRINT"][oe_n - 1][1, :],
]
if self.RAY_TRACING_IMAGE:
overplot_data_image = [
1e3 * calculated_data["OE_IMAGE"][oe_n - 1][1, :],
1e3 * calculated_data["OE_IMAGE"][oe_n - 1][0, :],
]
elif self.DO_PLOT_GRID == 2:
# mirror grid
index += 1
dataX = calculated_data["OE_FOOTPRINT"][oe_n-1][0, :]
dataY = calculated_data["OE_FOOTPRINT"][oe_n-1][1, :]
self.plot_data1D(1e3*dataY, 1e3*dataX, index, 0, title="footprint oe %d"%oe_n,
ytitle="Y (shadow col 2) [mm]",
xtitle="X (shadow col 1) [mm]")
if self.RAY_TRACING_IMAGE:
# image grid
index += 1
dataX = calculated_data["OE_IMAGE"][oe_n-1][0, :]
dataY = calculated_data["OE_IMAGE"][oe_n-1][1, :]
if self.ORIENTATION_LOGIC == 0:
xtitle = "X (shadow col 1) [mm]"
ytitle = "Z (shadow col 2) [mm]"
elif self.ORIENTATION_LOGIC == 1:
xtitle = "H [mm]"
ytitle = "V [mm]"
self.plot_data1D(1e3*dataX, 1e3*dataY, index, 0,
title="image just after oe %d perp to beam"%oe_n,
xtitle=xtitle, ytitle=ytitle)
# mirror power density
index += 1
H = 1e3 * calculated_data["POWER_DENSITY_FOOTPRINT_H"][oe_n - 1]
V = 1e3 * calculated_data["POWER_DENSITY_FOOTPRINT_V"][oe_n - 1]
stepx = numpy.abs(H[1,0] - H[0,0])
stepy = numpy.abs(V[0,1] - V[0,0])
data2D = (calculated_data["POWER_DENSITY_FOOTPRINT"][oe_n - 1])
totPower2 = trapezoidal_rule_2d(data2D)
title = 'Power density [W/mm2] absorbed at element %d Integrated Power: %6.1f W' % (oe_n, totPower2)
if self.ORIENTATION_LOGIC == 0:
xtitle = 'X (shadow col 1) [mm] (%d pixels)' % (H.shape[0])
ytitle = 'Y (shadow col 2) [mm] (%d pixels)' % (H.shape[1])
elif self.ORIENTATION_LOGIC == 1:
xtitle = 'Width (perp to beam) [mm] (%d pixels)' % (H.shape[0])
ytitle = 'Length (along the beam) [mm] (%d pixels)' % (H.shape[1])
self.plot_data2D(data2D / (stepx * stepy) , H[:,0], V[0,:],
index, 0, mode=self.PLOT_MODE,
overplot=overplot_data_footprint,
xtitle=xtitle,
ytitle=ytitle,
title=title)
if self.DUMP_ANSYS_FILES == 0:
pass
if self.DUMP_ANSYS_FILES == 1: # as plotted
write_ansys_files(data2D / (stepx * stepy), H[:,0], V[0,:], oe_number=oe_n)
elif self.DUMP_ANSYS_FILES == 2: # transposed
write_ansys_files(data2D.T / (stepx * stepy), V[0,:], H[:,0], oe_number=oe_n)
if self.RAY_TRACING_IMAGE:
# image power density
index += 1
data2D = calculated_data["POWER_DENSITY_IMAGE"][oe_n - 1]
H = 1e3 * calculated_data["POWER_DENSITY_IMAGE_H"][oe_n - 1]
V = 1e3 * calculated_data["POWER_DENSITY_IMAGE_V"][oe_n - 1]
stepx = H[1,0] - H[0,0]
stepy = V[0,1] - V[0,0]
totPower2 = trapezoidal_rule_2d(data2D)
title = 'Power density [W/mm2] transmitted after element %d Integrated Power: %6.1f W' % (oe_n, totPower2)
if self.ORIENTATION_LOGIC == 0:
xtitle = 'X (shadow col 1) [mm] (%d pixels)' % (H.shape[0])
ytitle = 'Z (shadow col 3) [mm] (%d pixels)' % (H.shape[1])
elif self.ORIENTATION_LOGIC == 1:
xtitle = 'H [mm] (%d pixels)' % (H.shape[0])
ytitle = 'V [mm] (%d pixels)' % (H.shape[1])
self.plot_data2D(data2D / (stepx * stepy), H[:,0], V[0,:],
index, 0, mode=self.PLOT_MODE,
overplot=overplot_data_image,
xtitle=xtitle,
ytitle=ytitle,
title=title)
if self.DUMP_ANSYS_FILES == 0:
pass
if self.DUMP_ANSYS_FILES == 1: # as plotted
write_ansys_files(data2D / (stepx * stepy), H[:, 0], V[0, :], oe_number=oe_n,
is_image=True)
elif self.DUMP_ANSYS_FILES == 2: # transposed
write_ansys_files(data2D.T / (stepx * stepy), V[0, :], H[:, 0], oe_number=oe_n,
is_image=True)
try:
self.tabs.setCurrentIndex(self.current_index)
except:
pass
def get_data_exchange_widget_name(self):
return "SRCALC"
def getTitles(self):
titles = []
for oe_n in range(self.NELEMENTS+1):
if self.SHOW_URGENT_PLOTS == 0:
if oe_n == 0: titles.append("[oe %d (urgent)]"%oe_n)
else:
titles.append("[oe %d (urgent)]" % oe_n)
if oe_n > 0:
if self.DO_PLOT_GRID == 2:
titles.append("[oe %d (ray-traced mirror grid)]" % oe_n)
if self.RAY_TRACING_IMAGE:
titles.append("[oe %d (ray-traced image grid)]" % oe_n)
titles.append("[oe %d (ray tracing mirror power)]" % oe_n)
if self.RAY_TRACING_IMAGE:
titles.append("[oe %d (ray tracing image power)]" % oe_n)
return titles
def run_urgent(self):
polarization_list, polarization_inversion, polarization_info = self.get_polarization_list()
if self.NUMBER_OF_POINTS_H > 50:
showCriticalMessage("Max NUMBER_OF_POINTS_H is 50")
raise Exception("Bad inputs")
if self.NUMBER_OF_POINTS_V > 50:
showCriticalMessage("Max NUMBER_OF_POINTS_V is 50")
raise Exception("Bad inputs")
if self.DEBUG_RUN_URGENT == 0:
for file in ["IDPower.TXT","O_IDPower.TXT","D_IDPower.TXT"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
f = open("IDPower.TXT","w")
f.write( "%s\n"% (os.path.join(locations.home_data(), "reflect" + os.sep)) )
f.write("%f\n" % self.KY) # READ(1,*) ky
f.write("%f\n" % self.RING_ENERGY) # READ(1,*) energy
f.write("%f\n" % self.RING_CURRENT) # READ(1,*) cur
f.write("%f\n" % self.SIGMAX) # READ(1,*) sigmx
f.write("%f\n" % self.SIGMAY) # READ(1,*) sigy
f.write("%f\n" % self.SIGMAXP) # READ(1,*) sigx1
f.write("%f\n" % self.SIGMAYP) # READ(1,*) sigy1
f.write("%d\n" % self.NUMBER_OF_PERIODS) # READ(1,*) n
f.write("%f\n" % self.PERIOD_LENGTH) # READ(1,*) period
f.write("%f\n" % self.SOURCE_SCREEN_DISTANCE) # READ(1,*) d p M1
f.write("%d\n" % self.NELEMENTS) # READ(1,*) nMir
#
# BEAMLINE
#
f.write("%f\n" % self.EL0_ANG) # READ(1,*) anM(1) # incidence angle
f.write("%d\n" % self.EL0_SHAPE) # READ(1,*) miType(1) # type
f.write("%d\n" % self.EL0_THICKNESS) # READ(1,*) thic(1)
f.write("'%s'\n" % self.coating_list[self.EL0_COATING]) # READ(1,*) com(1) # coating
f.write("'%s'\n" % polarization_list[0])
f.write("%f\n" % self.EL1_ANG) # READ(1,*) anM(1) # incidence angle
f.write("%d\n" % self.EL1_SHAPE) # READ(1,*) miType(1) # type
f.write("%d\n" % self.EL1_THICKNESS) # READ(1,*) thic(1)
f.write("'%s'\n" % self.coating_list[self.EL1_COATING]) # READ(1,*) com(1) # coating
f.write("'%s'\n" % polarization_list[1]) # READ(1,*) iPom(1) # ! Polarization or filter
f.write("%f\n" % self.EL2_ANG) # READ(1,*) anM(1) # incidence angle
f.write("%d\n" % self.EL2_SHAPE) # READ(1,*) miType(1) # type
f.write("%d\n" % self.EL2_THICKNESS) # READ(1,*) thic(1)
f.write("'%s'\n" % self.coating_list[self.EL2_COATING]) # READ(1,*) com(1) # coating
f.write("'%s'\n" % polarization_list[2]) # READ(1,*) iPom(1) # ! Polarization or filter
f.write("%f\n" % self.EL3_ANG) # READ(1,*) anM(1) # incidence angle
f.write("%d\n" % self.EL3_SHAPE) # READ(1,*) miType(1) # type
f.write("%d\n" % self.EL3_THICKNESS) # READ(1,*) thic(1)
f.write("'%s'\n" % self.coating_list[self.EL3_COATING]) # READ(1,*) com(1) # coating
f.write("'%s'\n" % polarization_list[3]) # READ(1,*) iPom(1) # ! Polarization or filter
f.write("%f\n" % self.EL4_ANG) # READ(1,*) anM(1) # incidence angle
f.write("%d\n" % self.EL4_SHAPE) # READ(1,*) miType(1) # type
f.write("%d\n" % self.EL4_THICKNESS) # READ(1,*) thic(1)
f.write("'%s'\n" % self.coating_list[self.EL4_COATING]) # READ(1,*) com(1) # coating
f.write("'%s'\n" % polarization_list[4]) # READ(1,*) iPom(1) # ! Polarization or filter
f.write("%f\n" % self.EL5_ANG) # READ(1,*) anM(1) # incidence angle
f.write("%d\n" % self.EL5_SHAPE) # READ(1,*) miType(1) # type
f.write("%d\n" % self.EL5_THICKNESS) # READ(1,*) thic(1)
f.write("'%s'\n" % self.coating_list[self.EL5_COATING]) # READ(1,*) com(1) # coating
f.write("'%s'\n" % polarization_list[5]) # READ(1,*) iPom(1) # ! Polarization or filter
#
# Calculation
#
f.write("%f\n" % self.HORIZONTAL_ACCEPTANCE) # READ(1,*) xps
f.write("%f\n" % self.VERTICAL_ACCEPTANCE) # READ(1,*) yps
f.write("%d\n" % self.NUMBER_OF_POINTS_H) # READ(1,*) nxp
f.write("%d\n" % self.NUMBER_OF_POINTS_V) # READ(1,*) nyp
f.write("%d\n" % -6) # READ(1,*) mode
f.write("%d\n" % self.NUMBER_OF_HARMONICS) # READ(1,*) iharm
f.write("%d\n" % 1) # READ(1,*) icalc
f.write("%d\n" % 1) # READ(1,*) itype
f.write("%d\n" % self.ELECTRON_SIGMAS) # READ(1,*) nSig
f.write("%d\n" % 20) # READ(1,*) nPhi
f.write("%d\n" % 20) # READ(1,*) nAlpha
f.write("%f\n" % 0.000000) # READ(1,*) dAlpha
f.write("%d\n" % 0) # READ(1,*) nOmega
f.write("%f\n" % 0.000000) # READ(1,*) dOmega
f.write("%f\n" % 0.000000) # READ(1,*) xpc
f.write("%f\n" % 0.000000) # READ(1,*) ypc
f.write("%d\n" % 0) # READ(1,*) ne
f.write("%f\n" % 0.000000) # READ(1,*) emin
f.write("%f\n" % 0.000000) # READ(1,*) emax
f.write("%f\n" % self.KX) # READ(1,*) kx
f.write("%f\n" % 0.000000) # READ(1,*) phase
f.close()
if platform.system() == "Windows":
command = "\"" + os.path.join(locations.home_bin(), 'srcalc') + "\""
else:
command = "'" + os.path.join(locations.home_bin(), 'srcalc') + "'"
print("Running command '%s' in directory: %s "%(command, locations.home_bin_run()))
print("\n--------------------------------------------------------\n")
os.system(command)
print("\n--------------------------------------------------------\n")
else:
print("\n--------------------------------------------------------\n")
print("\n ************ WARNING *****************************\n")
print("\n --- DEBUGGING: URGENT NOT RUN - REUSED LAST FILES ---\n")
print("\n--------------------------------------------------------\n")
def xoppy_calc_srcalc(self):
self.progressBarInit()
self.progressBarSet(2)
sys.stdout = EmittingStream(textWritten=self.writeStdOut)
grabber = TTYGrabber()
grabber.start()
# run fortran code (urgent-based code)
# self.progressBarSet(5)
self.run_urgent()
grabber.stop()
for row in grabber.ttyData:
self.writeStdOut(row)
# self.progressBarSet(60)
#
# display some info
#
txt0 = "3\n# Info from IDPoser/Urgent\n#\n"
f = open("O_IDPower.TXT",'r')
txt = f.read()
f.close()
txt2 = self.info_undulator()
txt3 = self.info_distances()
polarization_list, polarization_inversion, polarization_info = self.get_polarization_list() # (TO BE DELETED??)
self.info_output.setText("\n\n\n#\n# Info from IDPower/Urgent\n#\n" + txt + \
"\n\n\n#\n# Additional Info from undulator source\n#\n" + txt2 + \
"\n\n\n#\n# Additional Info o.e. distances\n#\n\n" + txt3 + \
"\n\n\n#\n# Additional Info o.e. polarization\n#\n\n" + polarization_info)
#
# load results from file created by fortran
#
out_dictionary = load_srcalc_output_file(filename="D_IDPower.TXT")
#
# do additional calculations (ray-tracing and power density maps)
# Note that the results of these two calculations are added to out_dictionary
#
#
# do the ray tracing
#
oe_parameters = {
"EL0_SHAPE": self.EL0_SHAPE ,
"EL0_P_POSITION": self.EL0_P_POSITION ,
"EL0_Q_POSITION": self.EL0_Q_POSITION,
"EL0_P_FOCUS": self.EL0_P_FOCUS ,
"EL0_Q_FOCUS": self.EL0_Q_FOCUS ,
"EL0_ANG": self.EL0_ANG ,
"EL0_THICKNESS": self.EL0_THICKNESS ,
"EL0_RELATIVE_TO_PREVIOUS": self.EL0_RELATIVE_TO_PREVIOUS,
"EL1_SHAPE": self.EL1_SHAPE ,
"EL1_P_POSITION": self.EL1_P_POSITION ,
"EL1_Q_POSITION": self.EL1_Q_POSITION,
"EL1_P_FOCUS": self.EL1_P_FOCUS ,
"EL1_Q_FOCUS": self.EL1_Q_FOCUS ,
"EL1_ANG": self.EL1_ANG ,
"EL1_THICKNESS": self.EL1_THICKNESS ,
"EL1_RELATIVE_TO_PREVIOUS": self.EL1_RELATIVE_TO_PREVIOUS,
"EL2_SHAPE": self.EL2_SHAPE ,
"EL2_P_POSITION": self.EL2_P_POSITION ,
"EL2_Q_POSITION": self.EL2_Q_POSITION,
"EL2_P_FOCUS": self.EL2_P_FOCUS ,
"EL2_Q_FOCUS": self.EL2_Q_FOCUS ,
"EL2_ANG": self.EL2_ANG ,
"EL2_THICKNESS": self.EL2_THICKNESS ,
"EL2_RELATIVE_TO_PREVIOUS": self.EL2_RELATIVE_TO_PREVIOUS,
"EL3_SHAPE": self.EL3_SHAPE ,
"EL3_P_POSITION": self.EL3_P_POSITION ,
"EL3_Q_POSITION": self.EL3_Q_POSITION,
"EL3_P_FOCUS": self.EL3_P_FOCUS ,
"EL3_Q_FOCUS": self.EL3_Q_FOCUS ,
"EL3_ANG": self.EL3_ANG ,
"EL3_THICKNESS": self.EL3_THICKNESS ,
"EL3_RELATIVE_TO_PREVIOUS": self.EL3_RELATIVE_TO_PREVIOUS,
"EL4_SHAPE": self.EL4_SHAPE ,
"EL4_P_POSITION": self.EL4_P_POSITION ,
"EL4_Q_POSITION": self.EL4_Q_POSITION,
"EL4_P_FOCUS": self.EL4_P_FOCUS ,
"EL4_Q_FOCUS": self.EL4_Q_FOCUS ,
"EL4_ANG": self.EL4_ANG ,
"EL4_THICKNESS": self.EL4_THICKNESS ,
"EL4_RELATIVE_TO_PREVIOUS": self.EL4_RELATIVE_TO_PREVIOUS,
"EL5_SHAPE": self.EL5_SHAPE ,
"EL5_P_POSITION": self.EL5_P_POSITION ,
"EL5_Q_POSITION": self.EL5_Q_POSITION,
"EL5_P_FOCUS": self.EL5_P_FOCUS ,
"EL5_Q_FOCUS": self.EL5_Q_FOCUS ,
"EL5_ANG": self.EL5_ANG ,
"EL5_THICKNESS": self.EL5_THICKNESS ,
"EL5_RELATIVE_TO_PREVIOUS": self.EL5_RELATIVE_TO_PREVIOUS,
}
self.progressBarSet(45)
# first run for mirror footprint
out_dictionary = ray_tracing(out_dictionary,
SOURCE_SCREEN_DISTANCE=self.SOURCE_SCREEN_DISTANCE,
number_of_elements=self.NELEMENTS,
oe_parameters=oe_parameters,
real_space_shuffle=[0, 0, 0],
store_footprint=True,
store_image=False,
accumulate_results=False,
run_index=None,
undo_shadow_orientation_angle_rotation=self.ORIENTATION_LOGIC,
)
tmp, flip_pixels_number, tmp = self.get_polarization_list()
out_dictionary = compute_power_density_footprint(out_dictionary,
interpolation_method=self.INTERPOLATION_METHOD,
ratio_pixels_0=self.RATIO_PIXELS_0,
ratio_pixels_1=self.RATIO_PIXELS_1,
flip_pixels_number=flip_pixels_number)
if self.RAY_TRACING_IMAGE == 1:
numpy.random.seed(self.RAY_TRACING_SEED)
if self.ORIENTATION_LOGIC == 1:
flip_pixels_number = [0] * 6
for i in range(self.RAY_TRACING_RUNS):
depth = (numpy.random.random() - 0.5) * self.NUMBER_OF_PERIODS * self.PERIOD_LENGTH
real_space_shuffle = [
1e-3 * self.SIGMAX * numpy.random.randn(),
depth,
1e-3 * self.SIGMAY * numpy.random.randn(),
]
print("\n\n\n\n******************** STARTING RUN INDEX %d WITH INITIAL CONDITIONS [x,y,z] ***********: "%i,real_space_shuffle)
out_dictionary = ray_tracing(out_dictionary,
SOURCE_SCREEN_DISTANCE=self.SOURCE_SCREEN_DISTANCE,
number_of_elements=self.NELEMENTS,
oe_parameters=oe_parameters,
real_space_shuffle=real_space_shuffle,
store_footprint=False,
store_image=True,
accumulate_results=True,
run_index=i,
undo_shadow_orientation_angle_rotation=self.ORIENTATION_LOGIC,
)
#
# calculate power density maps and add results to the dictionaire
#
out_dictionary = compute_power_density_image(out_dictionary,
interpolation_or_histogramming=self.INTERPOLATION_OR_HISTOGRAMMING,
interpolation_method=self.INTERPOLATION_METHOD,
ratio_pixels_0=self.RATIO_PIXELS_0,
ratio_pixels_1=self.RATIO_PIXELS_1,
flip_pixels_number=flip_pixels_number)
return out_dictionary
#
# overwritten methods
#
def plot_data1D(self, dataX, dataY, tabs_canvas_index, plot_canvas_index, title="", xtitle="", ytitle=""):
self.tab[tabs_canvas_index].layout().removeItem(self.tab[tabs_canvas_index].layout().itemAt(0))
self.plot_canvas[plot_canvas_index] = oasysgui.plotWindow()
self.plot_canvas[plot_canvas_index].addCurve(dataX, dataY, symbol=',', linestyle=' ')
self.plot_canvas[plot_canvas_index].resetZoom()
self.plot_canvas[plot_canvas_index].setXAxisAutoScale(True)
self.plot_canvas[plot_canvas_index].setYAxisAutoScale(True)
self.plot_canvas[plot_canvas_index].setGraphGrid(False)
self.plot_canvas[plot_canvas_index].setXAxisLogarithmic(False)
self.plot_canvas[plot_canvas_index].setYAxisLogarithmic(False)
self.plot_canvas[plot_canvas_index].setGraphXLabel(xtitle)
self.plot_canvas[plot_canvas_index].setGraphYLabel(ytitle)
self.plot_canvas[plot_canvas_index].setGraphTitle(title)
self.tab[tabs_canvas_index].layout().addWidget(self.plot_canvas[plot_canvas_index])
def plot_data2D(self, data2D, dataX, dataY, tabs_canvas_index, plot_canvas_index,
title="", xtitle="", ytitle="", mode=1, overplot = None):
for i in range(1+self.tab[tabs_canvas_index].layout().count()):
self.tab[tabs_canvas_index].layout().removeItem(self.tab[tabs_canvas_index].layout().itemAt(i))
origin = (dataX[0],dataY[0])
scale = (dataX[1]-dataX[0],dataY[1]-dataY[0])
colormap = {"name":"temperature", "normalization":"linear", "autoscale":True, "vmin":0, "vmax":0, "colors":256}
if mode == 0:
data_to_plot = data2D
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from srxraylib.plot.gol import plot_image
f = plot_image(data_to_plot,
dataX,
dataY,
xtitle=xtitle,
ytitle=ytitle,
title=title,
show=False,
aspect='auto')
figure = FigureCanvas(f[0])
self.plot_canvas[plot_canvas_index] = figure
elif mode == 1:
data_to_plot = data2D
self.plot_canvas[plot_canvas_index] = ImageViewWithFWHM() # Plot2D()
self.plot_canvas[plot_canvas_index].plot_2D(data_to_plot,
dataX, dataY, factor1=1e0, factor2=1e0,
title=title, xtitle=xtitle, ytitle=ytitle, xum="[mm]", yum="[mm]",
colormap=colormap)
self.tab[tabs_canvas_index].layout().addWidget(self.plot_canvas[plot_canvas_index])
elif mode == 2:
data_to_plot = data2D.T
self.plot_canvas[plot_canvas_index] = Plot2D()
self.plot_canvas[plot_canvas_index].resetZoom()
self.plot_canvas[plot_canvas_index].setXAxisAutoScale(True)
self.plot_canvas[plot_canvas_index].setYAxisAutoScale(True)
self.plot_canvas[plot_canvas_index].setGraphGrid(False)
self.plot_canvas[plot_canvas_index].setKeepDataAspectRatio(True)
self.plot_canvas[plot_canvas_index].yAxisInvertedAction.setVisible(False)
self.plot_canvas[plot_canvas_index].setXAxisLogarithmic(False)
self.plot_canvas[plot_canvas_index].setYAxisLogarithmic(False)
#silx 0.4.0
self.plot_canvas[plot_canvas_index].getMaskAction().setVisible(False)
self.plot_canvas[plot_canvas_index].getRoiAction().setVisible(False)
self.plot_canvas[plot_canvas_index].getColormapAction().setVisible(True)
self.plot_canvas[plot_canvas_index].setKeepDataAspectRatio(False)
self.plot_canvas[plot_canvas_index].addImage(numpy.array(data_to_plot),
legend="zio billy",
scale=scale,
origin=origin,
colormap=colormap,
replace=True)
self.plot_canvas[plot_canvas_index].setActiveImage("zio billy")
if overplot is not None:
self.plot_canvas[plot_canvas_index].addScatter(overplot[1],overplot[0],overplot[1]*0+data_to_plot.max()*2,
legend="tio pepe",
colormap={"name":"gray", "normalization":"linear", "autoscale":True, "vmin":0, "vmax":0, "colors":256},
symbol='.')
self.plot_canvas[plot_canvas_index].setGraphXLabel(xtitle)
self.plot_canvas[plot_canvas_index].setGraphYLabel(ytitle)
self.plot_canvas[plot_canvas_index].setGraphTitle(title)
self.tab[tabs_canvas_index].layout().addWidget(self.plot_canvas[plot_canvas_index])
def gamma(self):
return 1e9*self.RING_ENERGY / (codata.m_e * codata.c**2 / codata.e)
def info_undulator(self):
syned_electron_beam = ElectronBeam(
energy_in_GeV = self.RING_ENERGY,
energy_spread = 0.0,
current = self.RING_CURRENT,
number_of_bunches = 400,
moment_xx=(1e-3*self.SIGMAX)**2,
moment_xxp=0.0,
moment_xpxp=(1e-3*self.SIGMAXP)**2,
moment_yy=(1e-3*self.SIGMAY)**2,
moment_yyp=0.0,
moment_ypyp=(1e-3*self.SIGMAYP)**2,)
syned_undulator = Undulator(
K_vertical = self.KY,
K_horizontal = self.KX,
period_length = self.PERIOD_LENGTH,
number_of_periods = self.NUMBER_OF_PERIODS)
gamma = self.gamma()
Bx = syned_undulator.magnetic_field_horizontal()
By = syned_undulator.magnetic_field_vertical()
Ec = 665.0 * self.RING_ENERGY**2 * numpy.sqrt( Bx**2 + By**2)
# U_powerD = 10.84 * U_M_field_m * Energy ^ 4 * Current * U_Length * 100 / U_period
# U_powerD = 10.84 * numpy.sqrt( Bx**2 + By**2) * self.RING_ENERGY ** 4 * self.RING_CURRENT * self.NUMBER_OF_PERIODS
# Power Density[W / mrad2] = 116.18 * (Ee[GeV]) **4 * I[A] * N * K * G(K) / P[mm]
U_powerD = 116.18 * self.RING_ENERGY **4 * self.RING_CURRENT * self.NUMBER_OF_PERIODS * self.KY * 1.0 / (1e3 * self.PERIOD_LENGTH)
info_parameters = {
"electron_energy_in_GeV": self.RING_ENERGY,
"gamma": "%8.3f" % gamma,
"ring_current": "%4.3f " % syned_electron_beam.current(),
"K_horizontal": syned_undulator.K_horizontal(),
"K_vertical": syned_undulator.K_vertical(),
"period_length": syned_undulator.period_length(),
"number_of_periods": syned_undulator.number_of_periods(),
"undulator_length": syned_undulator.length(),
"critical_energy": "%6.3f" % Ec,
"resonance_energy": "%6.3f" % syned_undulator.resonance_energy(gamma, harmonic=1),
"resonance_energy3": "%6.3f" % syned_undulator.resonance_energy(gamma, harmonic=3),
"resonance_energy5": "%6.3f" % syned_undulator.resonance_energy(gamma, harmonic=5),
"B_horizontal": "%4.2F" % syned_undulator.magnetic_field_horizontal(),
"B_vertical": "%4.2F" % syned_undulator.magnetic_field_vertical(),
"cc_1": "%4.2f" % (1e6 * syned_undulator.gaussian_central_cone_aperture(gamma, 1)),
"cc_3": "%4.2f" % (1e6 * syned_undulator.gaussian_central_cone_aperture(gamma, 3)),
"cc_5": "%4.2f" % (1e6 * syned_undulator.gaussian_central_cone_aperture(gamma, 5)),
# "cc_7": "%4.2f" % (self.gaussian_central_cone_aperture(7)*1e6),
"sigma_rad": "%5.2f" % (1e6 * syned_undulator.get_sigmas_radiation(gamma, harmonic=1)[0]),
"sigma_rad_prime": "%5.2f" % (1e6 * syned_undulator.get_sigmas_radiation(gamma, harmonic=1)[1]),
"sigma_rad3": "%5.2f" % (1e6 * syned_undulator.get_sigmas_radiation(gamma, harmonic=3)[0]),
"sigma_rad_prime3": "%5.2f" % (1e6 * syned_undulator.get_sigmas_radiation(gamma, harmonic=3)[1]),
"sigma_rad5": "%5.2f" % (1e6 * syned_undulator.get_sigmas_radiation(gamma, harmonic=5)[0]),
"sigma_rad_prime5": "%5.2f" % (1e6 * syned_undulator.get_sigmas_radiation(gamma, harmonic=5)[1]),
"first_ring_1": "%5.2f" % (1e6 * syned_undulator.get_resonance_ring(gamma, harmonic=1, ring_order=1)),
"first_ring_3": "%5.2f" % (1e6 * syned_undulator.get_resonance_ring(gamma, harmonic=3, ring_order=1)),
"first_ring_5": "%5.2f" % (1e6 * syned_undulator.get_resonance_ring(gamma, harmonic=5, ring_order=1)),
"Sx": "%5.2f" % (1e6 * syned_undulator.get_photon_sizes_and_divergences(syned_electron_beam)[0]),
"Sy": "%5.2f" % (1e6 * syned_undulator.get_photon_sizes_and_divergences(syned_electron_beam)[1]),
"Sxp": "%5.2f" % (1e6 * syned_undulator.get_photon_sizes_and_divergences(syned_electron_beam)[2]),
"Syp": "%5.2f" % (1e6 * syned_undulator.get_photon_sizes_and_divergences(syned_electron_beam)[3]),
"und_power": "%5.2f" % syned_undulator.undulator_full_emitted_power(gamma, syned_electron_beam.current()),
"und_power_density": "%5.2f" % U_powerD ,
"CF_h": "%4.3f" % syned_undulator.approximated_coherent_fraction_horizontal(syned_electron_beam,
harmonic=1),
"CF_v": "%4.3f" % syned_undulator.approximated_coherent_fraction_vertical(syned_electron_beam, harmonic=1),
"CF": "%4.3f" % syned_undulator.approximated_coherent_fraction(syned_electron_beam, harmonic=1),
}
return self.info_template().format_map(info_parameters)
def info_distances(self):
txt = ' ******** SUMMARY OF DISTANCES ********\n'
txt += ' ** DISTANCES FOR ALL O.E. [m] ** \n\n'
txt += "%4s %20s %8s %8s %8s %8s \n" % ('OE#', 'TYPE', 'p [m]', 'q [m]', 'src-oe', 'src-screen')
txt += '----------------------------------------------------------------------\n'
txt_2 = '\n\n ******** ELLIPTICAL ELEMENTS ********\n'
txt_2 += "%4s %8s %8s %8s %1s\n" % ('OE#', 'p(ell)', 'q(ell)', 'p+q(ell)', 'M')
txt_2 += '----------------------------------------------------------------------\n'
P = [self.EL0_P_POSITION, self.EL1_P_POSITION, self.EL2_P_POSITION, self.EL3_P_POSITION, self.EL4_P_POSITION,
self.EL5_P_POSITION,]
Q = [self.EL0_Q_POSITION, self.EL1_Q_POSITION, self.EL2_Q_POSITION, self.EL3_Q_POSITION, self.EL4_Q_POSITION,
self.EL5_Q_POSITION, ]
SHAPE_INDEX = [self.EL0_SHAPE, self.EL1_SHAPE, self.EL2_SHAPE, self.EL3_SHAPE, self.EL4_SHAPE, self.EL5_SHAPE,]
oe = 0
final_screen_to_source = 0.0
for i in range(self.NELEMENTS):
oe += 1
p = P[i]
q = Q[i]
shape_index = SHAPE_INDEX[i]
final_screen_to_source = final_screen_to_source + p + q
oe_to_source = final_screen_to_source - q
txt += "%4d %20s %8.4f %8.4f %8.4f %8.4f \n" % (oe, self.shape_list[shape_index], p, q, oe_to_source, final_screen_to_source)
return txt
def info_template(self):
return \
"""
================ input parameters ===========
Electron beam energy [GeV]: {electron_energy_in_GeV}
Electron current: {ring_current}
Period Length [m]: {period_length}
Number of Periods: {number_of_periods}
Horizontal K: {K_horizontal}
Vertical K: {K_vertical}
==============================================
Electron beam gamma: {gamma}
Undulator Length [m]: {undulator_length}
Horizontal Peak Magnetic field [T]: {B_horizontal}
Vertical Peak Magnetic field [T]: {B_vertical}
Total power radiated by the undulator [W]: {und_power}
Power density at center of beam (if Kx=0) [W/mrad2]: {und_power_density}
Resonances:
Photon energy [eV]:
for harmonic 1 : {resonance_energy}
for harmonic 3 : {resonance_energy3}
for harmonic 3 : {resonance_energy5}
Critical energy: {critical_energy}
Central cone (RMS urad):
for harmonic 1 : {cc_1}
for harmonic 3 : {cc_3}
for harmonic 5 : {cc_5}
First ring at (urad):
for harmonic 1 : {first_ring_1}
for harmonic 3 : {first_ring_3}
for harmonic 5 : {first_ring_5}
Sizes and divergences of radiation :
at 1st harmonic: sigma: {sigma_rad} um, sigma': {sigma_rad_prime} urad
at 3rd harmonic: sigma: {sigma_rad3} um, sigma': {sigma_rad_prime3} urad
at 5th harmonic: sigma: {sigma_rad5} um, sigma': {sigma_rad_prime5} urad
Sizes and divergences of photon source (convolution) at resonance (1st harmonic): :
Sx: {Sx} um
Sy: {Sy} um,
Sx': {Sxp} urad
Sy': {Syp} urad
Approximated coherent fraction at 1st harmonic:
Horizontal: {CF_h} Vertical: {CF_v}
Coherent fraction 2D (HxV): {CF}
"""
def get_polarization_list(self):
if self.ORIENTATION_LOGIC == 0:
return self.get_polarization_list_shadow()
else:
return self.get_polarization_list_lab()
def get_polarization_list_shadow(self):
KY = self.KY
KX = self.KX
EL0_RELATIVE_TO_PREVIOUS = self.EL0_RELATIVE_TO_PREVIOUS
EL1_RELATIVE_TO_PREVIOUS = self.EL1_RELATIVE_TO_PREVIOUS
EL2_RELATIVE_TO_PREVIOUS = self.EL2_RELATIVE_TO_PREVIOUS
EL3_RELATIVE_TO_PREVIOUS = self.EL3_RELATIVE_TO_PREVIOUS
EL4_RELATIVE_TO_PREVIOUS = self.EL4_RELATIVE_TO_PREVIOUS
EL5_RELATIVE_TO_PREVIOUS = self.EL5_RELATIVE_TO_PREVIOUS
#
#
#
SP = ['s', 'p']
if KX != 0 and KY != 0:
source_pol = 0
txt = "Polarization at the source: f"
else:
if KX == 0:
source_pol = 0 # s
else:
source_pol = 1 # p
txt = "Polarization at the source: %s" % (SP[source_pol])
txt += "\nNumber of optical elements: %d" % self.NELEMENTS
RR = ['Left (90)', 'Right (270)', 'Up (0)', 'Down (180)']
RELATIVE_TO_PREVIOUS = [
RR[EL0_RELATIVE_TO_PREVIOUS],
RR[EL1_RELATIVE_TO_PREVIOUS],
RR[EL2_RELATIVE_TO_PREVIOUS],
RR[EL3_RELATIVE_TO_PREVIOUS],
RR[EL4_RELATIVE_TO_PREVIOUS],
RR[EL5_RELATIVE_TO_PREVIOUS], ]
# items = ['Left', 'Right', 'Up', 'Down'],
FLAG_PERPENDICULAR_TO_PREVIOUS = [
EL0_RELATIVE_TO_PREVIOUS < 2,
EL1_RELATIVE_TO_PREVIOUS < 2,
EL2_RELATIVE_TO_PREVIOUS < 2,
EL3_RELATIVE_TO_PREVIOUS < 2,
EL4_RELATIVE_TO_PREVIOUS < 2,
EL5_RELATIVE_TO_PREVIOUS < 2, ]
# s=0, p=1
txt += "\nRELATIVE_TO_PREVIOUS: " + str(RELATIVE_TO_PREVIOUS[0:self.NELEMENTS])[1:-1]
txt += "\nFLAG_PERPENDICULAR_TO_PREVIOUS: " + str(FLAG_PERPENDICULAR_TO_PREVIOUS[0:self.NELEMENTS])[1:-1]
NUMBER_OF_INVERSIONS = [0, 0, 0, 0, 0, 0]
for i in range(6):
if i == 0:
if FLAG_PERPENDICULAR_TO_PREVIOUS[0]:
NUMBER_OF_INVERSIONS[0] += 1
else:
if FLAG_PERPENDICULAR_TO_PREVIOUS[i]:
NUMBER_OF_INVERSIONS[i] += 1
NUMBER_OF_INVERSIONS[i] += NUMBER_OF_INVERSIONS[i - 1]
txt += "\nNUMBER_OF_INVERSIONS: " + str(NUMBER_OF_INVERSIONS[0:self.NELEMENTS])[1:-1]
NUMBER_OF_INVERSIONS_MODULO_2 = [0, 0, 0, 0, 0, 0]
for i in range(6):
NUMBER_OF_INVERSIONS_MODULO_2[i] = numpy.mod(NUMBER_OF_INVERSIONS[i], 2)
txt += "\nNUMBER_OF_INVERSIONS_MODULO_2: " + str(NUMBER_OF_INVERSIONS_MODULO_2[0:self.NELEMENTS])[1:-1]
OUTPUT_LIST = []
for i in range(6):
OUTPUT_LIST.append(SP[numpy.mod(NUMBER_OF_INVERSIONS_MODULO_2[i] + source_pol, 2)])
if (KX != 0 and KY != 0):
OUTPUT_LIST = ['f'] * 6
txt += "\nOUTPUT_LIST: " + str(OUTPUT_LIST[0:self.NELEMENTS])[1:-1]
txt += "\n"
return OUTPUT_LIST, NUMBER_OF_INVERSIONS_MODULO_2, txt
def get_polarization_list_lab(self):
KY = self.KY
KX = self.KX
EL0_RELATIVE_TO_PREVIOUS = self.EL0_RELATIVE_TO_PREVIOUS
EL1_RELATIVE_TO_PREVIOUS = self.EL1_RELATIVE_TO_PREVIOUS
EL2_RELATIVE_TO_PREVIOUS = self.EL2_RELATIVE_TO_PREVIOUS
EL3_RELATIVE_TO_PREVIOUS = self.EL3_RELATIVE_TO_PREVIOUS
EL4_RELATIVE_TO_PREVIOUS = self.EL4_RELATIVE_TO_PREVIOUS
EL5_RELATIVE_TO_PREVIOUS = self.EL5_RELATIVE_TO_PREVIOUS
#
#
#
SP = ['s', 'p']
if KX != 0 and KY != 0:
source_pol = 0
txt = "Polarization at the source: f"
else:
if KX == 0:
source_pol = 0 # s
else:
source_pol = 1 # p
txt = "Polarization at the source: %s" % (SP[source_pol])
txt += "\nNumber of optical elements: %d" % self.NELEMENTS
RR = ['Left (90)', 'Right (270)', 'Up (0)', 'Down (180)']
RELATIVE_TO_PREVIOUS = [
RR[EL0_RELATIVE_TO_PREVIOUS],
RR[EL1_RELATIVE_TO_PREVIOUS],
RR[EL2_RELATIVE_TO_PREVIOUS],
RR[EL3_RELATIVE_TO_PREVIOUS],
RR[EL4_RELATIVE_TO_PREVIOUS],
RR[EL5_RELATIVE_TO_PREVIOUS], ]
# items = ['Left', 'Right', 'Up', 'Down'],
FLAG_PERPENDICULAR_TO_PREVIOUS = [
EL0_RELATIVE_TO_PREVIOUS < 2,
EL1_RELATIVE_TO_PREVIOUS < 2,
EL2_RELATIVE_TO_PREVIOUS < 2,
EL3_RELATIVE_TO_PREVIOUS < 2,
EL4_RELATIVE_TO_PREVIOUS < 2,
EL5_RELATIVE_TO_PREVIOUS < 2, ]
# s=0, p=1
txt += "\nORIENTATION: " + str(RELATIVE_TO_PREVIOUS[0:self.NELEMENTS])[1:-1]
txt += "\nFLAG_PERPENDICULAR: " + str(FLAG_PERPENDICULAR_TO_PREVIOUS[0:self.NELEMENTS])[1:-1]
NUMBER_OF_INVERSIONS = [
int(FLAG_PERPENDICULAR_TO_PREVIOUS[0]),
int(FLAG_PERPENDICULAR_TO_PREVIOUS[1]),
int(FLAG_PERPENDICULAR_TO_PREVIOUS[2]),
int(FLAG_PERPENDICULAR_TO_PREVIOUS[3]),
int(FLAG_PERPENDICULAR_TO_PREVIOUS[4]),
int(FLAG_PERPENDICULAR_TO_PREVIOUS[5]) ]
txt += "\nNUMBER_OF_INVERSIONS: " + str(NUMBER_OF_INVERSIONS[0:self.NELEMENTS])[1:-1]
NUMBER_OF_INVERSIONS_MODULO_2 = NUMBER_OF_INVERSIONS
txt += "\nNUMBER_OF_INVERSIONS_MODULO_2: " + str(NUMBER_OF_INVERSIONS_MODULO_2[0:self.NELEMENTS])[1:-1]
OUTPUT_LIST = []
for i in range(6):
OUTPUT_LIST.append(SP[numpy.mod(NUMBER_OF_INVERSIONS_MODULO_2[i] + source_pol, 2)] )
if (KX != 0 and KY != 0):
OUTPUT_LIST = ['f'] * 6
txt += "\nOUTPUT_LIST: " + str(OUTPUT_LIST[0:self.NELEMENTS])[1:-1]
txt += "\n"
return OUTPUT_LIST, NUMBER_OF_INVERSIONS_MODULO_2, txt
def help1(self):
import os
from orangecontrib.xoppy.util.text_window import TextWindow
# from orangecontrib.xoppy.util.xoppy_util import locations
home_doc = locations.home_doc()
filename1 = os.path.join(home_doc, self.get_help_name() + '.txt')
TextWindow(file=filename1,parent=self)
if __name__ == "__main__":
app = QApplication(sys.argv)
w = OWsrcalc_idpower()
w.DEBUG_RUN_URGENT = 1
w.show()
app.exec()
w.saveSettings()
# import Shadow
# beam = Shadow.Beam()
# beam.load("star_srcalc_000.01")
# x, z, w = beam.getshcol([1,3,23])
# for i in range(5):
# print(x[i], z[i], w[i])
| srio/Orange-XOPPY | orangecontrib/xoppy/widgets/optics/srcalc_idpower.py | Python | bsd-2-clause | 83,132 | [
"CRYSTAL"
] | 1cd0bb00ef705bd5c6ac971ed3daf27c47ba2f76671e2874935d6332b346dd78 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007 B. Malengier
# Copyright (C) 2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
"""
Display a person's relations to the home person
"""
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.simple import SimpleAccess, SimpleDoc
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.relationship import get_relationship_calculator
# define the formatting string once as a constant. Since this is reused
_FMT = "%-3d %s"
_FMT_VOID = " %s"
_FMT_DET1 = "%-3s %-15s"
_FMT_DET2 = "%-30s %-15s\t%-10s %-2s"
def run(database, document, person):
"""
Create the report class, and produce the quick report
"""
report = AllRelReport(database, document, person)
report.run()
class AllRelReport:
"""
Obtains all relationships, displays the relations, and in details, the
relation path
"""
def __init__(self, database, document, person):
self.database = database
self.person = person
self.sdb = SimpleAccess(database)
self.sdoc = SimpleDoc(document)
self.rel_class = get_relationship_calculator(glocale)
self.msg_list = []
def run(self):
#get home_person
self.home_person = self.database.get_default_person()
if not self.home_person :
self.sdoc.paragraph(_("Home person not set."))
return
self.print_title()
p2 = self.sdb.name(self.home_person)
p1 = self.sdb.name(self.person)
if self.person.handle == self.home_person.handle :
self.sdoc.paragraph(_FMT_VOID % (
_("%(person)s and %(active_person)s are the same person.")) % {
'person' : p1, 'active_person' : p2 })
return
#check if not a family too:
is_spouse = self.rel_class.is_spouse(self.database, self.home_person,
self.person)
if is_spouse:
rel_string = is_spouse
rstr = _("%(person)s is the %(relationship)s of %(active_person)s."
) % {'person' : p1, 'relationship' : rel_string,
'active_person' : p2 }
self.sdoc.paragraph(_FMT_VOID % (rstr))
self.sdoc.paragraph("")
#obtain all relationships, assume home person has largest tree
common, self.msg_list = self.rel_class.get_relationship_distance_new(
self.database, self.person, self.home_person,
all_families=True,
all_dist=True,
only_birth=False)
#all relations
if (not common or common[0][0]== -1 ) and not is_spouse:
rstr = _("%(person)s and %(active_person)s are not "
"directly related.") % {'person' : p2,
'active_person' : p1 }
self.sdoc.paragraph(_FMT_VOID % (rstr))
self.sdoc.paragraph("")
#collapse common so parents of same fam in common are one line
commonnew = self.rel_class.collapse_relations(common)
self.print_details_header(commonnew, self.home_person, self.person,
skip_list_text=None)
self.print_details_path(commonnew, self.home_person, self.person)
self.print_details_path(commonnew, self.home_person, self.person,
first=False)
if not common or common[0][0]== -1 :
self.remarks(self.msg_list)
self.msg_list = []
#check inlaw relation next
else:
#stop
return
#we check the inlaw relationships if not partners.
if is_spouse:
return
handles_done = [(self.person.handle, self.home_person.handle)]
inlaws_pers = [self.person] + self.get_inlaws(self.person)
inlaws_home = [self.home_person] + self.get_inlaws(self.home_person)
#remove overlap:
inlaws_home = [x for x in inlaws_home if x not in inlaws_pers]
inlawwritten = False
skiplist = []
commonnew = []
for inlawpers in inlaws_pers:
for inlawhome in inlaws_home:
if (inlawpers, inlawhome) in handles_done :
continue
else:
handles_done.append((inlawpers, inlawhome))
common, msg = \
self.rel_class.get_relationship_distance_new(
self.database, inlawpers, inlawhome,
all_families=True,
all_dist=True,
only_birth=False)
if msg:
self.msg_list += msg
if common and not common[0][0] == -1:
if not inlawwritten:
rstr = _("%(person)s and %(active_person)s have "
"following in-law relations:"
) % {'person' : p2,
'active_person' : p1 }
self.sdoc.paragraph(_FMT_VOID % (rstr))
self.sdoc.paragraph("")
inlawwritten = True
else:
continue
inlawb = not inlawpers.handle == self.person.handle
inlawa = not inlawhome.handle == self.home_person.handle
commonnew.append((inlawa, inlawb, inlawhome, inlawpers,
self.rel_class.collapse_relations(common)))
skip=[]
skip_text = []
count = 1
for inlawa, inlawb, inlawhome, inlawpers, commonrel in commonnew:
count = self.print_details_header(commonrel,
inlawhome, inlawpers,
inlawa = inlawa, inlawb = inlawb,
count=count,
skip_list=skip, skip_list_text = skip_text)
count = 1
for inlawa, inlawb, inlawhome, inlawpers, commonrel in commonnew:
self.print_details_path(commonrel, inlawhome, inlawpers,
inlawa = inlawa, inlawb = inlawb,
count = count, skip_list = skip)
count = self.print_details_path(commonrel, inlawhome, inlawpers,
inlawa = inlawa, inlawb = inlawb,
count = count, skip_list = skip,
first = False)
self.remarks(self.msg_list, True)
def get_inlaws(self, person):
inlaws = []
family_handles = person.get_family_handle_list()
for handle in family_handles:
fam = self.database.get_family_from_handle(handle)
if fam.father_handle and \
not fam.father_handle == person.handle:
inlaws.append(self.database.get_person_from_handle(
fam.father_handle))
elif fam.mother_handle and \
not fam.mother_handle == person.handle:
inlaws.append(self.database.get_person_from_handle(
fam.mother_handle))
return inlaws
def print_title(self):
""" print the title
"""
p2 = self.sdb.name(self.home_person)
p1 = self.sdb.name(self.person)
self.sdoc.title(_("Relationships of %(person)s to %(active_person)s") % {
'person' : p1 ,'active_person' : p2 })
self.sdoc.paragraph("")
def print_details_header(self, relations, pers1, pers2,
inlawa=False, inlawb=False, count=1,
skip_list=[], skip_list_text = []):
if not relations or relations[0][0] == -1:
return count
sdoc = self.sdoc
rel_class = self.rel_class
for relation in relations:
birth = self.rel_class.only_birth(relation[2])\
and self.rel_class.only_birth(relation[4])
distorig = len(relation[4])
distother = len(relation[2])
if distorig == distother == 1 and not inlawa \
and not inlawb:
rel_str = self.rel_class.get_sibling_relationship_string(
self.rel_class.get_sibling_type(
self.database, pers1, pers2),
self.home_person.get_gender(),
self.person.get_gender())
else:
rel_str = self.rel_class.get_single_relationship_string(
distorig, distother,
self.home_person.get_gender(),
self.person.get_gender(),
relation[4], relation[2],
only_birth = birth,
in_law_a = inlawa, in_law_b = inlawb)
if skip_list_text is not None:
if rel_str in skip_list_text:
skip_list.append(count)
else:
skip_list_text.append(rel_str)
sdoc.paragraph(_FMT % (count-len(skip_list), rel_str))
else:
sdoc.paragraph(_FMT % (count, rel_str))
count += 1
return count
def print_details_path(self, relations, pers1, pers2,
inlawa=False, inlawb=False,
count = 1, skip_list = [], first=True):
if not relations or relations[0][0] == -1:
return count
sdoc = self.sdoc
rel_class = self.rel_class
p2 = self.sdb.name(self.home_person)
p1 = self.sdb.name(self.person)
pers = p2
inlaw = inlawa
if first:
pers = p1
inlaw = inlawb
if count == 1:
sdoc.paragraph("")
sdoc.header1(_("Detailed path from %(person)s to common ancestor"
) % {'person':pers})
sdoc.paragraph("")
sdoc.header2(_FMT_DET1 % (' ', _('Name Common ancestor')))
sdoc.header2(_FMT_DET2 % (' ', _('Parent'), _('Birth'), _('Family')))
sdoc.paragraph("")
for relation in relations:
if count in skip_list:
count += 1
continue
counter = str(count - len([x for x in range(count) if x+1 in skip_list]))
name = _('Unknown')
if relation[1]:
name = self.sdb.name(self.database.get_person_from_handle(
relation[1][0]))
for handle in relation[1][1:]:
name += ' ' + _('and') + ' ' + self.sdb.name(
self.database.get_person_from_handle(handle))
sdoc.paragraph(_FMT_DET1 % (counter, name))
if inlaw:
sdoc.paragraph(_FMT_DET2 % (' ', _('Partner'), ' ', ' '))
if first:
ind1 = 2
ind2 = 3
else:
ind1 = 4
ind2 = 5
for rel,fam in zip(relation[ind1],relation[ind2]):
par_str = _('Unknown') #when sibling, parent is unknown
if rel == rel_class.REL_MOTHER \
or rel == rel_class.REL_MOTHER_NOTBIRTH:
par_str = _('Mother')
if rel == rel_class.REL_FATHER \
or rel == rel_class.REL_FATHER_NOTBIRTH:
par_str = _('Father')
if (rel == rel_class.REL_FAM_BIRTH
or rel == rel_class.REL_FAM_NONBIRTH
or rel == rel_class.REL_FAM_BIRTH_MOTH_ONLY
or rel == rel_class.REL_FAM_BIRTH_FATH_ONLY):
par_str = _('Parents')
birth_str = _('Yes')
if (rel == rel_class.REL_MOTHER_NOTBIRTH
or rel == rel_class.REL_FATHER_NOTBIRTH
or rel == rel_class.REL_FAM_NONBIRTH):
birth_str = _('No')
elif (rel == rel_class.REL_FAM_BIRTH_FATH_ONLY
or rel == rel_class.REL_FAM_BIRTH_MOTH_ONLY):
birth_str = _('Partial')
famstr = ''
if isinstance(fam, list):
famstr = str(fam[0]+1)
for val in fam[1:] :
# TODO for Arabic, should the next comma be translated?
famstr += ', ' + str(val+1)
else:
famstr = str(fam+1)
sdoc.paragraph(_FMT_DET2 % (' ', par_str, birth_str, famstr))
counter=''
name = ''
count += 1
return count
def remarks(self, msg_list, inlaw=False):
if msg_list :
sdoc = self.sdoc
sdoc.paragraph("")
if inlaw:
sdoc.header1(_("Remarks with inlaw family"))
else:
sdoc.header1(_("Remarks"))
sdoc.paragraph("")
sdoc.paragraph(_("The following problems were encountered:"))
list(map(sdoc.paragraph, msg_list))
sdoc.paragraph("")
sdoc.paragraph("")
| SNoiraud/gramps | gramps/plugins/quickview/all_relations.py | Python | gpl-2.0 | 14,420 | [
"Brian"
] | d916c35393a98b8ffd1005b4623d5dbf67b514ffcf8bdb8cdb5e8e085fafcc58 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for misc.GritNode'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
import StringIO
from grit import grd_reader
import grit.exception
from grit import util
from grit.format import rc
from grit.node import misc
class GritNodeUnittest(unittest.TestCase):
def testUniqueNameAttribute(self):
try:
restree = grd_reader.Parse(
util.PathFromRoot('grit/testdata/duplicate-name-input.xml'))
self.fail('Expected parsing exception because of duplicate names.')
except grit.exception.Parsing:
pass # Expected case
def testReadFirstIdsFromFile(self):
test_resource_ids = os.path.join(os.path.dirname(__file__), '..',
'testdata', 'resource_ids')
base_dir = os.path.dirname(test_resource_ids)
src_dir, id_dict = misc._ReadFirstIdsFromFile(
test_resource_ids,
{
'FOO': os.path.join(base_dir, 'bar'),
'SHARED_INTERMEDIATE_DIR': os.path.join(base_dir,
'out/Release/obj/gen'),
})
self.assertEqual({}, id_dict.get('bar/file.grd', None))
self.assertEqual({},
id_dict.get('out/Release/obj/gen/devtools/devtools.grd', None))
src_dir, id_dict = misc._ReadFirstIdsFromFile(
test_resource_ids,
{
'SHARED_INTERMEDIATE_DIR': '/outside/src_dir',
})
self.assertEqual({}, id_dict.get('devtools.grd', None))
# Verifies that GetInputFiles() returns the correct list of files
# corresponding to ChromeScaledImage nodes when assets are missing.
def testGetInputFilesChromeScaledImage(self):
chrome_html_path = util.PathFromRoot('grit/testdata/chrome_html.html')
xml = '''<?xml version="1.0" encoding="utf-8"?>
<grit latest_public_release="0" current_release="1">
<outputs>
<output filename="default.pak" type="data_package" context="default_100_percent" />
<output filename="special.pak" type="data_package" context="special_100_percent" fallback_to_default_layout="false" />
</outputs>
<release seq="1">
<structures fallback_to_low_resolution="true">
<structure type="chrome_scaled_image" name="IDR_A" file="a.png" />
<structure type="chrome_scaled_image" name="IDR_B" file="b.png" />
<structure type="chrome_html" name="HTML_FILE1" file="%s" flattenhtml="true" />
</structures>
</release>
</grit>''' % chrome_html_path
grd = grd_reader.Parse(StringIO.StringIO(xml), util.PathFromRoot('grit/testdata'))
expected = ['chrome_html.html', 'default_100_percent/a.png',
'default_100_percent/b.png', 'included_sample.html',
'special_100_percent/a.png']
actual = [os.path.relpath(path, util.PathFromRoot('grit/testdata')) for path in grd.GetInputFiles()]
# Convert path separator for Windows paths.
actual = [path.replace('\\', '/') for path in actual]
self.assertEquals(expected, actual)
# Verifies that GetInputFiles() returns the correct list of files
# when files include other files.
def testGetInputFilesFromIncludes(self):
chrome_html_path = util.PathFromRoot('grit/testdata/chrome_html.html')
xml = '''<?xml version="1.0" encoding="utf-8"?>
<grit latest_public_release="0" current_release="1">
<outputs>
<output filename="default.pak" type="data_package" context="default_100_percent" />
<output filename="special.pak" type="data_package" context="special_100_percent" fallback_to_default_layout="false" />
</outputs>
<release seq="1">
<includes>
<include name="IDR_TESTDATA_CHROME_HTML" file="%s" flattenhtml="true"
allowexternalscript="true" type="BINDATA" />
</includes>
</release>
</grit>''' % chrome_html_path
grd = grd_reader.Parse(StringIO.StringIO(xml), util.PathFromRoot('grit/testdata'))
expected = ['chrome_html.html', 'included_sample.html']
actual = [os.path.relpath(path, util.PathFromRoot('grit/testdata')) for path in grd.GetInputFiles()]
# Convert path separator for Windows paths.
actual = [path.replace('\\', '/') for path in actual]
self.assertEquals(expected, actual)
class IfNodeUnittest(unittest.TestCase):
def testIffyness(self):
grd = grd_reader.Parse(StringIO.StringIO('''
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<if expr="'bingo' in defs">
<message name="IDS_BINGO">
Bingo!
</message>
</if>
<if expr="'hello' in defs">
<message name="IDS_HELLO">
Hello!
</message>
</if>
<if expr="lang == 'fr' or 'FORCE_FRENCH' in defs">
<message name="IDS_HELLO" internal_comment="French version">
Good morning
</message>
</if>
<if expr="is_win">
<message name="IDS_ISWIN">is_win</message>
</if>
</messages>
</release>
</grit>'''), dir='.')
messages_node = grd.children[0].children[0]
bingo_message = messages_node.children[0].children[0]
hello_message = messages_node.children[1].children[0]
french_message = messages_node.children[2].children[0]
is_win_message = messages_node.children[3].children[0]
self.assertTrue(bingo_message.name == 'message')
self.assertTrue(hello_message.name == 'message')
self.assertTrue(french_message.name == 'message')
grd.SetOutputLanguage('fr')
grd.SetDefines({'hello': '1'})
active = set(grd.ActiveDescendants())
self.failUnless(bingo_message not in active)
self.failUnless(hello_message in active)
self.failUnless(french_message in active)
grd.SetOutputLanguage('en')
grd.SetDefines({'bingo': 1})
active = set(grd.ActiveDescendants())
self.failUnless(bingo_message in active)
self.failUnless(hello_message not in active)
self.failUnless(french_message not in active)
grd.SetOutputLanguage('en')
grd.SetDefines({'FORCE_FRENCH': '1', 'bingo': '1'})
active = set(grd.ActiveDescendants())
self.failUnless(bingo_message in active)
self.failUnless(hello_message not in active)
self.failUnless(french_message in active)
grd.SetOutputLanguage('en')
grd.SetDefines({})
self.failUnless(grd.target_platform == sys.platform)
grd.SetTargetPlatform('darwin')
active = set(grd.ActiveDescendants())
self.failUnless(is_win_message not in active)
grd.SetTargetPlatform('win32')
active = set(grd.ActiveDescendants())
self.failUnless(is_win_message in active)
def testElsiness(self):
grd = util.ParseGrdForUnittest('''
<messages>
<if expr="True">
<then> <message name="IDS_YES1"></message> </then>
<else> <message name="IDS_NO1"></message> </else>
</if>
<if expr="True">
<then> <message name="IDS_YES2"></message> </then>
<else> </else>
</if>
<if expr="True">
<then> </then>
<else> <message name="IDS_NO2"></message> </else>
</if>
<if expr="True">
<then> </then>
<else> </else>
</if>
<if expr="False">
<then> <message name="IDS_NO3"></message> </then>
<else> <message name="IDS_YES3"></message> </else>
</if>
<if expr="False">
<then> <message name="IDS_NO4"></message> </then>
<else> </else>
</if>
<if expr="False">
<then> </then>
<else> <message name="IDS_YES4"></message> </else>
</if>
<if expr="False">
<then> </then>
<else> </else>
</if>
</messages>''')
included = [msg.attrs['name'] for msg in grd.ActiveDescendants()
if msg.name == 'message']
self.assertEqual(['IDS_YES1', 'IDS_YES2', 'IDS_YES3', 'IDS_YES4'], included)
def testIffynessWithOutputNodes(self):
grd = grd_reader.Parse(StringIO.StringIO('''
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<outputs>
<output filename="uncond1.rc" type="rc_data" />
<if expr="lang == 'fr' or 'hello' in defs">
<output filename="only_fr.adm" type="adm" />
<output filename="only_fr.plist" type="plist" />
</if>
<if expr="lang == 'ru'">
<output filename="doc.html" type="document" />
</if>
<output filename="uncond2.adm" type="adm" />
<output filename="iftest.h" type="rc_header">
<emit emit_type='prepend'></emit>
</output>
</outputs>
</grit>'''), dir='.')
outputs_node = grd.children[0]
uncond1_output = outputs_node.children[0]
only_fr_adm_output = outputs_node.children[1].children[0]
only_fr_plist_output = outputs_node.children[1].children[1]
doc_output = outputs_node.children[2].children[0]
uncond2_output = outputs_node.children[0]
self.assertTrue(uncond1_output.name == 'output')
self.assertTrue(only_fr_adm_output.name == 'output')
self.assertTrue(only_fr_plist_output.name == 'output')
self.assertTrue(doc_output.name == 'output')
self.assertTrue(uncond2_output.name == 'output')
grd.SetOutputLanguage('ru')
grd.SetDefines({'hello': '1'})
outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
self.assertEquals(
outputs,
['uncond1.rc', 'only_fr.adm', 'only_fr.plist', 'doc.html',
'uncond2.adm', 'iftest.h'])
grd.SetOutputLanguage('ru')
grd.SetDefines({'bingo': '2'})
outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
self.assertEquals(
outputs,
['uncond1.rc', 'doc.html', 'uncond2.adm', 'iftest.h'])
grd.SetOutputLanguage('fr')
grd.SetDefines({'hello': '1'})
outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
self.assertEquals(
outputs,
['uncond1.rc', 'only_fr.adm', 'only_fr.plist', 'uncond2.adm',
'iftest.h'])
grd.SetOutputLanguage('en')
grd.SetDefines({'bingo': '1'})
outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
self.assertEquals(outputs, ['uncond1.rc', 'uncond2.adm', 'iftest.h'])
grd.SetOutputLanguage('fr')
grd.SetDefines({'bingo': '1'})
outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
self.assertNotEquals(outputs, ['uncond1.rc', 'uncond2.adm', 'iftest.h'])
def testChildrenAccepted(self):
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<includes>
<if expr="'bingo' in defs">
<include type="gif" name="ID_LOGO2" file="images/logo2.gif" />
</if>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<include type="gif" name="ID_LOGO2" file="images/logo2.gif" />
</if>
</if>
</includes>
<structures>
<if expr="'bingo' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" />
</if>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" />
</if>
</if>
</structures>
<messages>
<if expr="'bingo' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
</if>
</messages>
</release>
<translations>
<if expr="'bingo' in defs">
<file lang="nl" path="nl_translations.xtb" />
</if>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<file lang="nl" path="nl_translations.xtb" />
</if>
</if>
</translations>
</grit>'''), dir='.')
def testIfBadChildrenNesting(self):
# includes
xml = StringIO.StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<includes>
<if expr="'bingo' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" />
</if>
</includes>
</release>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
# messages
xml = StringIO.StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<if expr="'bingo' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" />
</if>
</messages>
</release>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
# structures
xml = StringIO.StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<structures>
<if expr="'bingo' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
</structures>
</release>
</grit>''')
# translations
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
xml = StringIO.StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<if expr="'bingo' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
</translations>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
# same with nesting
xml = StringIO.StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<includes>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" />
</if>
</if>
</includes>
</release>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
xml = StringIO.StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" />
</if>
</if>
</messages>
</release>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
xml = StringIO.StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<structures>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
</if>
</structures>
</release>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
xml = StringIO.StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
</if>
</translations>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
class ReleaseNodeUnittest(unittest.TestCase):
def testPseudoControl(self):
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="1" source_lang_id="en-US" current_release="2" base_dir=".">
<release seq="1" allow_pseudo="false">
<messages>
<message name="IDS_HELLO">
Hello
</message>
</messages>
<structures>
<structure type="dialog" name="IDD_ABOUTBOX" encoding="utf-16" file="klonk.rc" />
</structures>
</release>
<release seq="2">
<messages>
<message name="IDS_BINGO">
Bingo
</message>
</messages>
<structures>
<structure type="menu" name="IDC_KLONKMENU" encoding="utf-16" file="klonk.rc" />
</structures>
</release>
</grit>'''), util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
hello = grd.GetNodeById('IDS_HELLO')
aboutbox = grd.GetNodeById('IDD_ABOUTBOX')
bingo = grd.GetNodeById('IDS_BINGO')
menu = grd.GetNodeById('IDC_KLONKMENU')
for node in [hello, aboutbox]:
self.failUnless(not node.PseudoIsAllowed())
for node in [bingo, menu]:
self.failUnless(node.PseudoIsAllowed())
# TODO(benrg): There was a test here that formatting hello and aboutbox with
# a pseudo language should fail, but they do not fail and the test was
# broken and failed to catch it. Fix this.
# Should not raise an exception since pseudo is allowed
rc.FormatMessage(bingo, 'xyz-pseudo')
rc.FormatStructure(menu, 'xyz-pseudo', '.')
if __name__ == '__main__':
unittest.main()
| hujiajie/chromium-crosswalk | tools/grit/grit/node/misc_unittest.py | Python | bsd-3-clause | 18,622 | [
"xTB"
] | 0b4f2785c3b8a9fff43ec05f780d7a72c1e39d9accb3b7f15099e93079976113 |
# coding=utf-8
# Copyright 2020 The Google Research Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main BERT model and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.gfile.Open(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(object):
"""BERT model ("Bidirectional Encoder Representations from Transformers").
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config, is_training=True,
input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
label_embeddings = tf.get_variable(...)
pooled_output = model.get_pooled_output()
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=False,
scope=None):
"""Constructor for BertModel.
Args:
config: `BertConfig` instance.
is_training: bool. true for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings.
scope: (optional) variable scope. Defaults to "bert".
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, default_name="bert"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.word_embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.hidden_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.word_embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob)
with tf.variable_scope("encoder"):
# This converts a 2D mask of shape [batch_size, seq_length] to a 3D
# mask of shape [batch_size, seq_length, seq_length] which is used
# for the attention scores.
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True)
self.sequence_output = self.all_encoder_layers[-1]
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_word_embedding_output(self):
"""Get output of the word(piece) embedding lookup.
This is BEFORE positional embeddings and token type embeddings have been
added.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the word(piece) embedding layer.
"""
return self.word_embedding_output
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return self.embedding_table
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, rate=dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return contrib_layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.nn.embedding_lookup()`.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
if use_one_hot_embeddings:
flat_input_ids = tf.reshape(input_ids, [-1])
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.nn.embedding_lookup(embedding_table, input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
def dense_layer_3d(input_tensor,
num_attention_heads,
size_per_head,
initializer,
activation,
name=None):
"""A dense layer with 3D kernel.
Args:
input_tensor: float Tensor of shape [batch, seq_length, hidden_size].
num_attention_heads: Number of attention heads.
size_per_head: The size per attention head.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
last_dim = get_shape_list(input_tensor)[-1]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[last_dim, num_attention_heads * size_per_head],
initializer=initializer)
w = tf.reshape(w, [last_dim, num_attention_heads, size_per_head])
b = tf.get_variable(
name="bias",
shape=[num_attention_heads * size_per_head],
initializer=tf.zeros_initializer)
b = tf.reshape(b, [num_attention_heads, size_per_head])
ret = tf.einsum("abc,cde->abde", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dense_layer_3d_proj(input_tensor,
hidden_size,
num_attention_heads,
head_size,
initializer,
activation,
name=None):
"""A dense layer with 3D kernel for projection.
Args:
input_tensor: float Tensor of shape [batch,from_seq_length,
num_attention_heads, size_per_head].
hidden_size: The size of hidden layer.
num_attention_heads: The size of output dimension.
head_size: The size of head.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
head_size = hidden_size // num_attention_heads
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[hidden_size, hidden_size],
initializer=initializer)
w = tf.reshape(w, [num_attention_heads, head_size, hidden_size])
b = tf.get_variable(
name="bias", shape=[hidden_size], initializer=tf.zeros_initializer)
ret = tf.einsum("BFNH,NHD->BFD", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dense_layer_2d(input_tensor,
output_size,
initializer,
activation,
name=None):
"""A dense layer with 2D kernel.
Args:
input_tensor: Float tensor with rank 3.
output_size: The size of output dimension.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
last_dim = get_shape_list(input_tensor)[-1]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel", shape=[last_dim, output_size], initializer=initializer)
b = tf.get_variable(
name="bias", shape=[output_size], initializer=tf.zeros_initializer)
ret = tf.einsum("abc,cd->abd", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with tf.einsum as follows:
Input_tensor: [BFD]
Wq, Wk, Wv: [DNH]
Q:[BFNH] = einsum('BFD,DNH->BFNH', Input_tensor, Wq)
K:[BTNH] = einsum('BTD,DNH->BTNH', Input_tensor, Wk)
V:[BTNH] = einsum('BTD,DNH->BTNH', Input_tensor, Wv)
attention_scores:[BNFT] = einsum('BFNH,BTNH>BNFT', Q, K) / sqrt(H)
attention_probs:[BNFT] = softmax(attention_scores)
context_layer:[BFNH] = einsum('BNFT,BTNH->BFNH', attention_probs, V)
Wout:[DNH]
Output:[BFD] = einsum('BFNH,DNH>BFD', context_layer, Wout)
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length, num_attention_heads,
size_per_head].
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
# `query_layer` = [B, F, N, H]
query_layer = dense_layer_3d(from_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), query_act,
"query")
# `key_layer` = [B, T, N, H]
key_layer = dense_layer_3d(to_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), key_act,
"key")
# `value_layer` = [B, T, N, H]
value_layer = dense_layer_3d(to_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), value_act,
"value")
# Take the dot product between "query" and "key" to get the raw
# attention scores.
attention_scores = tf.einsum("BTNH,BFNH->BNFT", key_layer, query_layer)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `context_layer` = [B, F, N, H]
context_layer = tf.einsum("BNFT,BTNH->BFNH", attention_probs, value_layer)
return context_layer
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
prev_output = input_tensor
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.variable_scope("attention"):
with tf.variable_scope("self"):
attention_output = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = dense_layer_3d_proj(
attention_output, hidden_size,
num_attention_heads, attention_head_size,
create_initializer(initializer_range), None, "dense")
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = dense_layer_2d(
attention_output, intermediate_size,
create_initializer(initializer_range), intermediate_act_fn, "dense")
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = dense_layer_2d(intermediate_output, hidden_size,
create_initializer(initializer_range),
None, "dense")
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
return all_layer_outputs
else:
return all_layer_outputs[-1]
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
| google-research-datasets/tydiqa | baseline/bert/modeling.py | Python | apache-2.0 | 39,067 | [
"Gaussian"
] | be0915a4a4adbdcc3bc6a129fc6b8a976b8c10c56ebf181a19d47ca6b4a3b185 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import numpy as np
from pyspark import SparkContext, since
from pyspark.mllib.common import callMLlibFunc, inherit_doc
from pyspark.mllib.linalg import Vectors, SparseVector, _convert_to_vector
from pyspark.sql import DataFrame
class MLUtils(object):
"""
Helper methods to load, save and pre-process data used in MLlib.
.. versionadded:: 1.0.0
"""
@staticmethod
def _parse_libsvm_line(line):
"""
Parses a line in LIBSVM format into (label, indices, values).
"""
items = line.split(None)
label = float(items[0])
nnz = len(items) - 1
indices = np.zeros(nnz, dtype=np.int32)
values = np.zeros(nnz)
for i in range(nnz):
index, value = items[1 + i].split(":")
indices[i] = int(index) - 1
values[i] = float(value)
return label, indices, values
@staticmethod
def _convert_labeled_point_to_libsvm(p):
"""Converts a LabeledPoint to a string in LIBSVM format."""
from pyspark.mllib.regression import LabeledPoint
assert isinstance(p, LabeledPoint)
items = [str(p.label)]
v = _convert_to_vector(p.features)
if isinstance(v, SparseVector):
nnz = len(v.indices)
for i in range(nnz):
items.append(str(v.indices[i] + 1) + ":" + str(v.values[i]))
else:
for i in range(len(v)):
items.append(str(i + 1) + ":" + str(v[i]))
return " ".join(items)
@staticmethod
def loadLibSVMFile(sc, path, numFeatures=-1, minPartitions=None):
"""
Loads labeled data in the LIBSVM format into an RDD of
LabeledPoint. The LIBSVM format is a text-based format used by
LIBSVM and LIBLINEAR. Each line represents a labeled sparse
feature vector using the following format:
label index1:value1 index2:value2 ...
where the indices are one-based and in ascending order. This
method parses each line into a LabeledPoint, where the feature
indices are converted to zero-based.
.. versionadded:: 1.0.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
Spark context
path : str
file or directory path in any Hadoop-supported file system URI
numFeatures : int, optional
number of features, which will be determined
from the input data if a nonpositive value
is given. This is useful when the dataset is
already split into multiple files and you
want to load them separately, because some
features may not present in certain files,
which leads to inconsistent feature
dimensions.
minPartitions : int, optional
min number of partitions
Returns
-------
:py:class:`pyspark.RDD`
labeled data stored as an RDD of LabeledPoint
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> tempFile = NamedTemporaryFile(delete=True)
>>> _ = tempFile.write(b"+1 1:1.0 3:2.0 5:3.0\\n-1\\n-1 2:4.0 4:5.0 6:6.0")
>>> tempFile.flush()
>>> examples = MLUtils.loadLibSVMFile(sc, tempFile.name).collect()
>>> tempFile.close()
>>> examples[0]
LabeledPoint(1.0, (6,[0,2,4],[1.0,2.0,3.0]))
>>> examples[1]
LabeledPoint(-1.0, (6,[],[]))
>>> examples[2]
LabeledPoint(-1.0, (6,[1,3,5],[4.0,5.0,6.0]))
"""
from pyspark.mllib.regression import LabeledPoint
lines = sc.textFile(path, minPartitions)
parsed = lines.map(lambda l: MLUtils._parse_libsvm_line(l))
if numFeatures <= 0:
parsed.cache()
numFeatures = parsed.map(lambda x: -1 if x[1].size == 0 else x[1][-1]).reduce(max) + 1
return parsed.map(lambda x: LabeledPoint(x[0], Vectors.sparse(numFeatures, x[1], x[2])))
@staticmethod
def saveAsLibSVMFile(data, dir):
"""
Save labeled data in LIBSVM format.
.. versionadded:: 1.0.0
Parameters
----------
data : :py:class:`pyspark.RDD`
an RDD of LabeledPoint to be saved
dir : str
directory to save the data
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> from fileinput import input
>>> from pyspark.mllib.regression import LabeledPoint
>>> from glob import glob
>>> from pyspark.mllib.util import MLUtils
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, 1.23), (2, 4.56)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> MLUtils.saveAsLibSVMFile(sc.parallelize(examples), tempFile.name)
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0.0 1:1.01 2:2.02 3:3.03\\n1.1 1:1.23 3:4.56\\n'
"""
lines = data.map(lambda p: MLUtils._convert_labeled_point_to_libsvm(p))
lines.saveAsTextFile(dir)
@staticmethod
def loadLabeledPoints(sc, path, minPartitions=None):
"""
Load labeled points saved using RDD.saveAsTextFile.
.. versionadded:: 1.0.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
Spark context
path : str
file or directory path in any Hadoop-supported file system URI
minPartitions : int, optional
min number of partitions
Returns
-------
:py:class:`pyspark.RDD`
labeled data stored as an RDD of LabeledPoint
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, -1.23), (2, 4.56e-7)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(examples, 1).saveAsTextFile(tempFile.name)
>>> MLUtils.loadLabeledPoints(sc, tempFile.name).collect()
[LabeledPoint(1.1, (3,[0,2],[-1.23,4.56e-07])), LabeledPoint(0.0, [1.01,2.02,3.03])]
"""
minPartitions = minPartitions or min(sc.defaultParallelism, 2)
return callMLlibFunc("loadLabeledPoints", sc, path, minPartitions)
@staticmethod
@since("1.5.0")
def appendBias(data):
"""
Returns a new vector with `1.0` (bias) appended to
the end of the input vector.
"""
vec = _convert_to_vector(data)
if isinstance(vec, SparseVector):
newIndices = np.append(vec.indices, len(vec))
newValues = np.append(vec.values, 1.0)
return SparseVector(len(vec) + 1, newIndices, newValues)
else:
return _convert_to_vector(np.append(vec.toArray(), 1.0))
@staticmethod
@since("1.5.0")
def loadVectors(sc, path):
"""
Loads vectors saved using `RDD[Vector].saveAsTextFile`
with the default number of partitions.
"""
return callMLlibFunc("loadVectors", sc, path)
@staticmethod
def convertVectorColumnsToML(dataset, *cols):
"""
Converts vector columns in an input DataFrame from the
:py:class:`pyspark.mllib.linalg.Vector` type to the new
:py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml`
package.
.. versionadded:: 2.0.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
input dataset
\\*cols : str
Vector columns to be converted.
New vector columns will be ignored. If unspecified, all old
vector columns will be converted excepted nested ones.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
the input dataset with old vector columns converted to the
new vector type
Examples
--------
>>> import pyspark
>>> from pyspark.mllib.linalg import Vectors
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))],
... ["id", "x", "y"])
>>> r1 = MLUtils.convertVectorColumnsToML(df).first()
>>> isinstance(r1.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r1.y, pyspark.ml.linalg.DenseVector)
True
>>> r2 = MLUtils.convertVectorColumnsToML(df, "x").first()
>>> isinstance(r2.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r2.y, pyspark.mllib.linalg.DenseVector)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertVectorColumnsToML", dataset, list(cols))
@staticmethod
def convertVectorColumnsFromML(dataset, *cols):
"""
Converts vector columns in an input DataFrame to the
:py:class:`pyspark.mllib.linalg.Vector` type from the new
:py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml`
package.
.. versionadded:: 2.0.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
input dataset
\\*cols : str
Vector columns to be converted.
Old vector columns will be ignored. If unspecified, all new
vector columns will be converted except nested ones.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
the input dataset with new vector columns converted to the
old vector type
Examples
--------
>>> import pyspark
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))],
... ["id", "x", "y"])
>>> r1 = MLUtils.convertVectorColumnsFromML(df).first()
>>> isinstance(r1.x, pyspark.mllib.linalg.SparseVector)
True
>>> isinstance(r1.y, pyspark.mllib.linalg.DenseVector)
True
>>> r2 = MLUtils.convertVectorColumnsFromML(df, "x").first()
>>> isinstance(r2.x, pyspark.mllib.linalg.SparseVector)
True
>>> isinstance(r2.y, pyspark.ml.linalg.DenseVector)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertVectorColumnsFromML", dataset, list(cols))
@staticmethod
def convertMatrixColumnsToML(dataset, *cols):
"""
Converts matrix columns in an input DataFrame from the
:py:class:`pyspark.mllib.linalg.Matrix` type to the new
:py:class:`pyspark.ml.linalg.Matrix` type under the `spark.ml`
package.
.. versionadded:: 2.0.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
input dataset
\\*cols : str
Matrix columns to be converted.
New matrix columns will be ignored. If unspecified, all old
matrix columns will be converted excepted nested ones.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
the input dataset with old matrix columns converted to the
new matrix type
Examples
--------
>>> import pyspark
>>> from pyspark.mllib.linalg import Matrices
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Matrices.sparse(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]),
... Matrices.dense(2, 2, range(4)))], ["id", "x", "y"])
>>> r1 = MLUtils.convertMatrixColumnsToML(df).first()
>>> isinstance(r1.x, pyspark.ml.linalg.SparseMatrix)
True
>>> isinstance(r1.y, pyspark.ml.linalg.DenseMatrix)
True
>>> r2 = MLUtils.convertMatrixColumnsToML(df, "x").first()
>>> isinstance(r2.x, pyspark.ml.linalg.SparseMatrix)
True
>>> isinstance(r2.y, pyspark.mllib.linalg.DenseMatrix)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertMatrixColumnsToML", dataset, list(cols))
@staticmethod
def convertMatrixColumnsFromML(dataset, *cols):
"""
Converts matrix columns in an input DataFrame to the
:py:class:`pyspark.mllib.linalg.Matrix` type from the new
:py:class:`pyspark.ml.linalg.Matrix` type under the `spark.ml`
package.
.. versionadded:: 2.0.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
input dataset
\\*cols : str
Matrix columns to be converted.
Old matrix columns will be ignored. If unspecified, all new
matrix columns will be converted except nested ones.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
the input dataset with new matrix columns converted to the
old matrix type
Examples
--------
>>> import pyspark
>>> from pyspark.ml.linalg import Matrices
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Matrices.sparse(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]),
... Matrices.dense(2, 2, range(4)))], ["id", "x", "y"])
>>> r1 = MLUtils.convertMatrixColumnsFromML(df).first()
>>> isinstance(r1.x, pyspark.mllib.linalg.SparseMatrix)
True
>>> isinstance(r1.y, pyspark.mllib.linalg.DenseMatrix)
True
>>> r2 = MLUtils.convertMatrixColumnsFromML(df, "x").first()
>>> isinstance(r2.x, pyspark.mllib.linalg.SparseMatrix)
True
>>> isinstance(r2.y, pyspark.ml.linalg.DenseMatrix)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertMatrixColumnsFromML", dataset, list(cols))
class Saveable(object):
"""
Mixin for models and transformers which may be saved as files.
.. versionadded:: 1.3.0
"""
def save(self, sc, path):
"""
Save this model to the given path.
This saves:
* human-readable (JSON) model metadata to path/metadata/
* Parquet formatted data to path/data/
The model may be loaded using :py:meth:`Loader.load`.
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
Spark context used to save model data.
path : str
Path specifying the directory in which to save
this model. If the directory already exists,
this method throws an exception.
"""
raise NotImplementedError
@inherit_doc
class JavaSaveable(Saveable):
"""
Mixin for models that provide save() through their Scala
implementation.
.. versionadded:: 1.3.0
"""
@since("1.3.0")
def save(self, sc, path):
"""Save this model to the given path."""
if not isinstance(sc, SparkContext):
raise TypeError("sc should be a SparkContext, got type %s" % type(sc))
if not isinstance(path, str):
raise TypeError("path should be a string, got type %s" % type(path))
self._java_model.save(sc._jsc.sc(), path)
class Loader(object):
"""
Mixin for classes which can load saved models from files.
.. versionadded:: 1.3.0
"""
@classmethod
def load(cls, sc, path):
"""
Load a model from the given path. The model should have been
saved using :py:meth:`Saveable.save`.
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
Spark context used for loading model files.
path : str
Path specifying the directory to which the model was saved.
Returns
-------
object
model instance
"""
raise NotImplementedError
@inherit_doc
class JavaLoader(Loader):
"""
Mixin for classes which can load saved models using its Scala
implementation.
.. versionadded:: 1.3.0
"""
@classmethod
def _java_loader_class(cls):
"""
Returns the full class name of the Java loader. The default
implementation replaces "pyspark" by "org.apache.spark" in
the Python full class name.
"""
java_package = cls.__module__.replace("pyspark", "org.apache.spark")
return ".".join([java_package, cls.__name__])
@classmethod
def _load_java(cls, sc, path):
"""
Load a Java model from the given path.
"""
java_class = cls._java_loader_class()
java_obj = sc._jvm
for name in java_class.split("."):
java_obj = getattr(java_obj, name)
return java_obj.load(sc._jsc.sc(), path)
@classmethod
@since("1.3.0")
def load(cls, sc, path):
"""Load a model from the given path."""
java_model = cls._load_java(sc, path)
return cls(java_model)
class LinearDataGenerator(object):
"""Utils for generating linear data.
.. versionadded:: 1.5.0
"""
@staticmethod
def generateLinearInput(intercept, weights, xMean, xVariance,
nPoints, seed, eps):
"""
.. versionadded:: 1.5.0
Parameters
----------
intercept : float
bias factor, the term c in X'w + c
weights : :py:class:`pyspark.mllib.linalg.Vector` or convertible
feature vector, the term w in X'w + c
xMean : :py:class:`pyspark.mllib.linalg.Vector` or convertible
Point around which the data X is centered.
xVariance : :py:class:`pyspark.mllib.linalg.Vector` or convertible
Variance of the given data
nPoints : int
Number of points to be generated
seed : int
Random Seed
eps : float
Used to scale the noise. If eps is set high,
the amount of gaussian noise added is more.
Returns
-------
list
of :py:class:`pyspark.mllib.regression.LabeledPoints` of length nPoints
"""
weights = [float(weight) for weight in weights]
xMean = [float(mean) for mean in xMean]
xVariance = [float(var) for var in xVariance]
return list(callMLlibFunc(
"generateLinearInputWrapper", float(intercept), weights, xMean,
xVariance, int(nPoints), int(seed), float(eps)))
@staticmethod
@since("1.5.0")
def generateLinearRDD(sc, nexamples, nfeatures, eps,
nParts=2, intercept=0.0):
"""
Generate an RDD of LabeledPoints.
"""
return callMLlibFunc(
"generateLinearRDDWrapper", sc, int(nexamples), int(nfeatures),
float(eps), int(nParts), float(intercept))
def _test():
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("mllib.util tests")\
.getOrCreate()
globs['spark'] = spark
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| maropu/spark | python/pyspark/mllib/util.py | Python | apache-2.0 | 21,149 | [
"Gaussian"
] | 8c829af178cf48ed035ba52aef6f141af10573c1967c9f4c111f4a95a28b6e38 |
#!/usr/bin/env python
import re
import sys
import time
from Bio import SeqIO
from StringIO import StringIO
from Bio import AlignIO
import os
import argparse
from __init__ import __version__
"""
Transcriptome Annotation by Modular Algorithms (TAMA)
TAMA Collapse
Author: Richard I. Kuo
This script collapses transcripts and groups transcripts into genes for long reads mapped onto a genome assembly.
"""
tc_version = 'tc0.0'
tc_date = 'tc_version_date_2020_12_14'
### Notes on changes
# Fixed issue with coordinates of soft clipped variants in the variant output file.
#####################################################################
#####################################################################
ap = argparse.ArgumentParser(description='This script collapses mapped transcript models')
ap.add_argument('-s', type=str, nargs=1, help='Sorted sam file (required)')
ap.add_argument('-f', type=str, nargs=1, help='Genome fasta file (required)')
ap.add_argument('-p', type=str, nargs=1, help='Output prefix (required)')
ap.add_argument('-x', type=str, nargs=1, help='Capped flag: capped or no_cap')
ap.add_argument('-e', type=str, nargs=1, help='Collapse exon ends flag: common_ends or longest_ends (default common_ends)')
ap.add_argument('-c', type=str, nargs=1, help='Coverage (default 99)')
ap.add_argument('-i', type=str, nargs=1, help='Identity (default 85)')
ap.add_argument('-icm', type=str, nargs=1, help='Identity calculation method (default ident_cov for including coverage) (alternate is ident_map for excluding hard and soft clipping)')
ap.add_argument('-a', type=str, nargs=1, help='5 prime threshold (default 10)')
ap.add_argument('-m', type=str, nargs=1, help='Exon/Splice junction threshold (default 10)')
ap.add_argument('-z', type=str, nargs=1, help='3 prime threshold (default 10)')
ap.add_argument('-d', type=str, nargs=1, help='Flag for merging duplicate transcript groups (default is merge_dup will merge duplicates ,no_merge quits when duplicates are found)')
ap.add_argument('-sj', type=str, nargs=1, help='Use error threshold to prioritize the use of splice junction information from collapsing transcripts(default no_priority, activate with sj_priority)')
ap.add_argument('-sjt', type=str, nargs=1, help='Threshold for detecting errors near splice junctions (default is 10bp)')
ap.add_argument('-lde', type=str, nargs=1, help='Threshold for amount of local density error near splice junctions that is allowed (default is 1000 errors which practically means no threshold is applied)')
ap.add_argument('-ses', type=str, nargs=1, help='Simple error symbol. Use this to pick the symbol used to represent matches in the simple error string for LDE output.')
ap.add_argument('-b', type=str, nargs=1, help='Use BAM instead of SAM')
ap.add_argument('-log', type=str, nargs=1, help='Turns off log output to screen of collapsing process. (default on, use log_off to turn off)')
ap.add_argument('-v', type=str, nargs=1, help='Prints out version date and exits.')
ap.add_argument('-rm', type=str, nargs=1, help='Run mode allows you to use original or low_mem mode, default is original')
ap.add_argument('-vc', type=str, nargs=1, help='Variation covwerage threshold: Default 5 reads')
opts = ap.parse_args()
#check for version request
if not opts.v:
print(tc_date)
else:
print(tc_date)
print("Program did not run")
sys.exit()
#check for missing args
missing_arg_flag = 0
if not opts.s:
print("Sam file is missing")
missing_arg_flag = 1
if not opts.f:
print("Fasta file missing")
missing_arg_flag = 1
if not opts.p:
print("Output prefix name missing")
missing_arg_flag = 1
if not opts.x:
print("Default capped flag will be used: Capped")
fiveprime_cap_flag = "capped"
else:
fiveprime_cap_flag = opts.x[0]
if fiveprime_cap_flag != "no_cap" and fiveprime_cap_flag != "capped":
print("Error with cap flag. Should be capped or no_cap.")
sys.exit()
if not opts.e:
print("Default collapse exon ends flag will be used: common_ends")
collapse_flag = "common_ends"
else:
collapse_flag = opts.e[0]
if not opts.c:
print("Default coverage: 99")
coverage_threshold = 99.0
else:
coverage_threshold = float(opts.c[0])
if not opts.i:
print("Default identity: 85")
identity_threshold = 85.0
else:
identity_threshold = float(opts.i[0])
if not opts.icm:
print("Default identity calculation method: ident_cov")
ident_calc_method = 'ident_cov'
else:
ident_calc_method = str(opts.icm[0])
if ident_calc_method != "ident_cov" and ident_calc_method != "ident_map":
print("Error with -icm input. Should be ident_cov or ident_map. Run terminated.")
print(ident_calc_method)
sys.exit()
if not opts.a:
print("Default 5 prime threshold: 10")
fiveprime_threshold = 10
else:
fiveprime_threshold = int(opts.a[0])
if not opts.m:
print("Default exon/splice junction threshold: 10")
exon_diff_threshold = 10
else:
exon_diff_threshold = int(opts.m[0])
if not opts.z:
print("Default 3 prime threshold: 10")
threeprime_threshold = 10
else:
threeprime_threshold = int(opts.z[0])
if not opts.d:
print("Default duplicate merge flag: merge_dup")
duplicate_flag = "merge_dup"
else:
duplicate_flag = str(opts.d[0])
if not opts.sj:
print("Default splice junction priority: no_priority")
sj_priority_flag = "no_priority"
else:
sj_priority_flag = str(opts.sj[0])
if not opts.sjt:
print("Default splice junction error threshold: 10")
sj_err_threshold = 10
else:
sj_err_threshold = int(opts.sjt[0])
if not opts.lde:
print("Default splice junction local density error threshold: 1000")
lde_threshold = 1000
else:
lde_threshold = int(opts.lde[0])
if not opts.ses:
print("Default simple error symbol for matches is the underscore \"_\" .")
ses_match_char = "_"
else:
ses_match_char = int(opts.ses[0])
if not opts.b:
print("Using SAM format for reading in.")
bam_flag = "SAM"
else:
print("Using BAM format for reading in.")
import pysam
bam_flag = str(opts.b[0])
if not opts.log:
print("Default log output on")
log_flag = "log_on"
else:
log_flag = str(opts.log[0])
if log_flag != "log_off":
print("Please use log_off to turn off log prints to screen")
sys.exit()
if not opts.rm:
print("Default run mode original")
run_mode_flag = "original"
else:
run_mode_flag = str(opts.rm[0])
if run_mode_flag != "original" and run_mode_flag != "low_mem" :
print("Please use original or low_mem for -rm setting")
sys.exit()
if not opts.vc:
print("Default 5 read threshold")
var_support_threshold = 5
else:
var_support_threshold = int(opts.vc[0])
if missing_arg_flag == 1:
print("Please try again with complete arguments")
sam_file = opts.s[0]
fasta_file_name = opts.f[0]
outfile_prefix = opts.p[0]
input_sambam_flag = "na"
if sam_file.endswith("bam"):
input_sambam_flag = "BAM"
if bam_flag == "SAM":
print("You designated SAM input but are supplying BAM. Please use the same format for input as specified.")
sys.exit()
if sam_file.endswith("sam"):
input_sambam_flag = "SAM"
if bam_flag == "BAM":
print("You designated BAM input but are supplying SAM. Please use the same format for input as specified.")
sys.exit()
if input_sambam_flag == "na":
print("Input SAM/BAM file not recoginized from extension format designation.")
#####################################################################
#####################################################################
start_time = time.time()
prev_time = start_time
#print("opening sam file")
#sam_file = sys.argv[1]
#sam_file_contents = open(sam_file).read().rstrip("\n").split("\n")
#print("opening fasta file")
#fasta_file_name = sys.argv[2]
#outfile_prefix = sys.argv[3]
#fiveprime_cap_flag = "capped"
#collapse_flag = "common_ends"
#default threshold, will add option to change via arguments
#coverage_threshold = 99.0
#identity_threshold = 85.0
#default poly A threshold
a_window = 20
a_perc_thresh = 70.0
no_mismatch_flag = "0" # use this for showing no mismatch near splice junction
# see calc_error_rate and sj_error_priority_start and sj_error_priority_end
bed_outfile_name = outfile_prefix + ".bed"
outfile_bed = open(bed_outfile_name,"w")
cluster_outfile_name = outfile_prefix + "_read.txt"
outfile_cluster = open(cluster_outfile_name,"w")
cluster_line = "\t".join(["read_id","mapped_flag","accept_flag","percent_coverage","percent_identity","error_line<h;s;i;d;m>", "length", "cigar"])
outfile_cluster.write(cluster_line)
outfile_cluster.write("\n")
trans_report_outfile_name = outfile_prefix + "_trans_report.txt"
outfile_trans_report = open(trans_report_outfile_name,"w")
trans_report_line = "\t".join(["transcript_id","num_clusters","high_coverage","low_coverage","high_quality_percent","low_quality_percent","start_wobble_list","end_wobble_list","collapse_sj_start_err","collapse_sj_end_err","collapse_error_nuc"])
#trans_report_line = "\t".join(["transcript_id","num_clusters","high_coverage","low_coverage","high_quality_percent","low_quality_percent","start_wobble_list","end_wobble_list","collapse_sj_start_err","collapse_sj_end_err","collapse_error_nuc","sj_error_simple"])
outfile_trans_report.write(trans_report_line)
outfile_trans_report.write("\n")
trans_clust_outfile_name = outfile_prefix + "_trans_read.bed"
outfile_trans_clust_report = open(trans_clust_outfile_name,"w")
#trans_clust_line = "\t".join(["transcript_id","cluster_id","scaffold","strand","start","end","exon_starts","exon_ends"])
#outfile_trans_clust_report.write(trans_clust_line)
#outfile_trans_clust_report.write("\n")
if run_mode_flag == "original":
variant_outfile_name = outfile_prefix + "_variants.txt"
outfile_variant = open(variant_outfile_name,"w")
variant_file_line = "\t".join(["scaffold","position","type","ref_allele","alt_allele","count","cov_count","cluster_list"])
outfile_variant.write(variant_file_line)
outfile_variant.write("\n")
varcov_outfile_name = outfile_prefix + "_varcov.txt"
outfile_varcov = open(varcov_outfile_name,"w")
varcov_file_line = "\t".join(["positions","overlap_clusters"])
outfile_varcov.write(varcov_file_line)
outfile_varcov.write("\n")
polya_outfile_name = outfile_prefix + "_polya.txt"
outfile_polya = open(polya_outfile_name,"w")
polya_file_line = "\t".join(["cluster_id","trans_id","strand","a_percent","a_count","sequence"])
outfile_polya.write(polya_file_line)
outfile_polya.write("\n")
#rtswitch_outfile_name = outfile_prefix + "_rtswitch.txt"
#outfile_rtswitch = open(rtswitch_outfile_name,"w")
#rtswitch_file_line = "\t".join(["trans_id","junct_num","first_seq","second_seq","rev_comp_first_seq"])
#outfile_rtswitch.write(rtswitch_file_line)
#outfile_rtswitch.write("\n")
strand_outfile_name = outfile_prefix + "_strand_check.txt"
outfile_strand = open(strand_outfile_name,"w")
strand_file_line = "\t".join(["read_id","scaff_name","start_pos","cigar","strands"])
outfile_strand.write(strand_file_line)
outfile_strand.write("\n")
lde_outfile_name = outfile_prefix + "_local_density_error.txt"
outfile_lde = open(lde_outfile_name,"w")
lde_file_line = "\t".join(["cluster_id","lde_flag","scaff_name","start_pos","end_pos","strand","num_exons","bad_sj_num_line","bad_sj_error_count_line","sj_error_profile_idmsh","sj_error_nuc","sj_error_simple","cigar"])
outfile_lde.write(lde_file_line)
outfile_lde.write("\n")
variation_dict = {} # variation_dict[scaffold][position][variant type][alt allele][cluster id] = 1
var_coverage_dict = {} # var_coverage_dict[scaffold][position][trans_id] = 1
## sj hash 2020/07/27
sj_hash_read_threshold = 20
check_trans_id = '11_c110717/1/696' #########################################################################debugging
def track_time(start_time,prev_time):
end_time = time.time()
time_taken = int(end_time - prev_time)
tt_hours = time_taken / 60
tt_hours = tt_hours /60
leftover_time = time_taken - (tt_hours * 60 * 60)
tt_minutes = leftover_time / 60
leftover_time = time_taken - (tt_minutes * 60)
tt_seconds = leftover_time
print("time taken since last check:\t" + str(tt_hours) + ":" + str(tt_minutes) + ":" + str(tt_seconds) )
time_total = int(end_time - start_time)
tt_hours = time_total / 60
tt_hours = tt_hours /60
leftover_time = time_total - (tt_hours * 60 * 60)
tt_minutes = leftover_time / 60
leftover_time = time_total - (tt_minutes * 60)
tt_seconds = leftover_time
print("time taken since beginning:\t" + str(tt_hours) + ":" + str(tt_minutes) + ":" + str(tt_seconds) )
this_time = end_time
return this_time
#convert cigar into list of digits and list of characters
def cigar_list(cigar):
# fix issue with X and = used in SAM format from mininmap2
cigar = re.sub('=', 'M', cigar)
cigar = re.sub('X', 'M', cigar)
cig_char = re.sub('\d', ' ', cigar)
cig_char_list = cig_char.split()
cig_digit = re.sub("[a-zA-Z]+", ' ', cigar)
cig_dig_list = cig_digit.split()
return(cig_dig_list,cig_char_list)
####################################################################################################
#get mapped sequence length
def mapped_seq_length(cigar):
[cig_dig_list,cig_char_list] = cigar_list(cigar)
map_seq_length = 0
for i in xrange(len(cig_dig_list)):
cig_flag = cig_char_list[i]
if cig_flag == "H":
continue
elif cig_flag == "S":
continue
elif cig_flag == "M":
map_seq_length = map_seq_length + int(cig_dig_list[i])
continue
elif cig_flag == "I":
continue
elif cig_flag == "D":
map_seq_length = map_seq_length + int(cig_dig_list[i])
continue
elif cig_flag == "N":
continue
return map_seq_length
####################################################################################################
#get the coordinate for the end of the transcript
def trans_coordinates(start_pos,cigar):
[cig_dig_list,cig_char_list] = cigar_list(cigar)
end_pos = int(start_pos)
exon_start_list = []
exon_end_list = []
exon_start_list.append(int(start_pos))
for i in xrange(len(cig_dig_list)):
cig_flag = cig_char_list[i]
if cig_flag == "H":
continue
elif cig_flag == "S":
continue
elif cig_flag == "M":
end_pos = end_pos + int(cig_dig_list[i])
continue
elif cig_flag == "I":
continue
elif cig_flag == "D":
end_pos = end_pos + int(cig_dig_list[i])
continue
elif cig_flag == "N":
#add exon end position to list (must be before updating end pos info)
exon_end_list.append(end_pos)
end_pos = end_pos + int(cig_dig_list[i])
#add exon start
exon_start_list.append(end_pos)
continue
#add last exon end position
exon_end_list.append(end_pos)
return end_pos,exon_start_list,exon_end_list
####################################################################################################
#deal with wildcards
nuc_char_dict = {} # nuc_char_dict[nuc char][nuc single] = 1
nuc_char_dict["A"] = {}
nuc_char_dict["A"]["A"] = 1
nuc_char_dict["T"] = {}
nuc_char_dict["T"]["T"] = 1
nuc_char_dict["C"] = {}
nuc_char_dict["C"]["C"] = 1
nuc_char_dict["G"] = {}
nuc_char_dict["G"]["G"] = 1
nuc_char_dict["S"] = {}
nuc_char_dict["S"]["C"] = 1
nuc_char_dict["S"]["G"] = 1
nuc_char_dict["W"] = {}
nuc_char_dict["W"]["A"] = 1
nuc_char_dict["W"]["T"] = 1
nuc_char_dict["K"] = {}
nuc_char_dict["K"]["A"] = 1
nuc_char_dict["K"]["C"] = 1
nuc_char_dict["M"] = {}
nuc_char_dict["M"]["G"] = 1
nuc_char_dict["M"]["T"] = 1
nuc_char_dict["Y"] = {}
nuc_char_dict["Y"]["A"] = 1
nuc_char_dict["Y"]["G"] = 1
nuc_char_dict["R"] = {}
nuc_char_dict["R"]["C"] = 1
nuc_char_dict["R"]["T"] = 1
nuc_char_dict["V"] = {}
nuc_char_dict["V"]["C"] = 1
nuc_char_dict["V"]["G"] = 1
nuc_char_dict["V"]["T"] = 1
nuc_char_dict["H"] = {}
nuc_char_dict["H"]["A"] = 1
nuc_char_dict["H"]["G"] = 1
nuc_char_dict["H"]["T"] = 1
nuc_char_dict["D"] = {}
nuc_char_dict["D"]["A"] = 1
nuc_char_dict["D"]["C"] = 1
nuc_char_dict["D"]["T"] = 1
nuc_char_dict["B"] = {}
nuc_char_dict["B"]["A"] = 1
nuc_char_dict["B"]["C"] = 1
nuc_char_dict["B"]["G"] = 1
nuc_char_dict["N"] = {}
nuc_char_dict["N"]["A"] = 1
nuc_char_dict["N"]["T"] = 1
nuc_char_dict["N"]["C"] = 1
nuc_char_dict["N"]["G"] = 1
#nuc_char_dict["Z"] = {}
#use this to find mismatch between two aligned sequences
def mismatch_seq(genome_seq,query_seq,genome_pos,seq_pos):
if len(genome_seq) != len(query_seq):
print("Genome seq is not the same length as query seq")
print(genome_seq)
print(query_seq)
print(genome_pos)
print(seq_pos)
sys.exit()
genome_mismatch_list = []
seq_mismatch_list = []
nuc_mismatch_list = []
for i in xrange(len(genome_seq)):
genome_nuc = genome_seq[i]
read_nuc = query_seq[i]
read_nuc = read_nuc.upper()
nuc_match_flag = 0
for g_nuc_char in nuc_char_dict[genome_nuc]:
for t_nuc_char in nuc_char_dict[read_nuc]:
if g_nuc_char == t_nuc_char:
nuc_match_flag = 1
#if genome_seq[i] != query_seq[i]: #########################################need to fix this for wildcard situations
if nuc_match_flag == 0:
genome_mismatch_coord = genome_pos + i
seq_mismatch_coord = seq_pos + i
genome_mismatch_list.append(genome_mismatch_coord)
seq_mismatch_list.append(seq_mismatch_coord)
nuc_mismatch_str = query_seq[i] + "." + genome_seq[i]
nuc_mismatch_list.append(nuc_mismatch_str)
return genome_mismatch_list,seq_mismatch_list,nuc_mismatch_list
####################################################################################################
def update_variation_dict(scaffold,var_pos,var_type,var_seq,read_id):
if type(var_seq) is list:
var_seq = "".join(var_seq)
if scaffold not in variation_dict:
variation_dict[scaffold] = {}
if var_pos not in variation_dict[scaffold]:
variation_dict[scaffold][var_pos] = {}
if var_type not in variation_dict[scaffold][var_pos]:
variation_dict[scaffold][var_pos][var_type] = {}
if var_seq not in variation_dict[scaffold][var_pos][var_type]:
variation_dict[scaffold][var_pos][var_type][var_seq] = {}
if read_id not in variation_dict[scaffold][var_pos][var_type][var_seq]:
variation_dict[scaffold][var_pos][var_type][var_seq][read_id] = 1
#For variation coverage
if scaffold not in var_coverage_dict:
var_coverage_dict[scaffold] = {}###############for var cov
if var_pos not in var_coverage_dict[scaffold]:
var_coverage_dict[scaffold][var_pos] = {}###############for var cov
var_coverage_dict[scaffold][var_pos][read_id] = 1
#used to calculate error rate of mapping
def calc_error_rate(start_pos,cigar,seq_list,scaffold,read_id):
[cig_dig_list,cig_char_list] = cigar_list(cigar)
h_count = 0
s_count = 0
i_count = 0
d_count = 0
mis_count = 0
all_genome_mismatch_list = []
all_nuc_mismatch_list = [] # this shows the nuc mismatch for the all_genome_mismatch_list
insertion_list = []
insertion_length_list = []
deletion_list = []
deletion_length_list = []
sj_pre_error_list = []
sj_post_error_list = []
nomatch_dict = {} # nomatch_dict[non match id] = list
nomatch_dict['mismatch_list'] = all_genome_mismatch_list
nomatch_dict['insertion_list'] = insertion_list
nomatch_dict['insertion_length_list'] = insertion_length_list
nomatch_dict['deletion_list'] = deletion_list
nomatch_dict['deletion_length_list'] = deletion_length_list
#walk through both the query seq and the mapped seq
genome_pos = start_pos - 1 #adjust for 1 base to 0 base coordinates
seq_pos = 0
for i in xrange(len(cig_dig_list)):
cig_flag = cig_char_list[i]
#print(seq_pos)
if cig_flag == "H":
h_count = h_count + int(cig_dig_list[i])
### for variation detection start
var_pos = genome_pos - h_count
var_seq = "N" * h_count
var_type = cig_flag
update_variation_dict(scaffold,var_pos,var_type,var_seq,read_id)
### for variation detection end
continue
elif cig_flag == "S":
s_count = s_count + int(cig_dig_list[i])
### for variation detection start
seq_start = seq_pos
seq_end = seq_pos + int(cig_dig_list[i])
var_pos = genome_pos - s_count
var_seq = seq_list[seq_start:seq_end]
var_type = cig_flag
###### change made 2019/11/19
#correct for soft clips at end of mapping
num_cig_entities = len(cig_dig_list)
if i == num_cig_entities - 1:
var_pos = genome_pos + 1 - s_count
###### change made 2019/11/19
update_variation_dict(scaffold,var_pos,var_type,var_seq,read_id)
### for variation detection end
#adjust seq position to account for missing soft mask in genome coord
seq_pos = seq_pos + int(cig_dig_list[i])
continue
elif cig_flag == "M":
match_length = int(cig_dig_list[i])
seq_start = seq_pos
seq_end = seq_pos + match_length
genome_start = genome_pos
genome_end = genome_pos + match_length
#seq_slice = seq_list[seq_start:seq_end+1]
#genome_slice = fasta_dict[scaffold][genome_start:genome_end+1]
seq_slice = seq_list[seq_start:seq_end]
genome_slice = fasta_dict[scaffold][genome_start:genome_end]
seq_diff = seq_end - seq_start
gen_diff = genome_end - genome_start
######################################################
#print(str(match_length)+cig_flag)
######################################################
#get number and location of mis matches
genome_mismatch_list,seq_mismatch_list,nuc_mismatch_list = mismatch_seq(genome_slice,seq_slice,genome_start,seq_start)
mis_count = mis_count + len(genome_mismatch_list)
all_genome_mismatch_list.extend(genome_mismatch_list)
all_nuc_mismatch_list.extend(nuc_mismatch_list)
### for variation detection start
for mismatch_index in xrange(len(seq_mismatch_list)):
var_pos = genome_mismatch_list[mismatch_index]
seq_var_pos = seq_mismatch_list[mismatch_index]
var_seq = seq_list[seq_var_pos]
var_type = cig_flag
update_variation_dict(scaffold,var_pos,var_type,var_seq,read_id)
### for variation detection end
seq_pos = seq_pos + match_length
genome_pos = genome_pos + match_length
continue
elif cig_flag == "I":
### for variation detection start
seq_start = seq_pos
seq_end = seq_pos + int(cig_dig_list[i])
var_pos = genome_pos
var_seq = seq_list[seq_start:seq_end]
var_type = cig_flag
update_variation_dict(scaffold,var_pos,var_type,var_seq,read_id)
### for variation detection end
insertion_length = int(cig_dig_list[i])
seq_pos = seq_pos + insertion_length
i_count = i_count + insertion_length
insertion_list.append(genome_pos)
insertion_length_list.append(insertion_length)
continue
elif cig_flag == "D":
deletion_list.append(genome_pos)
deletion_length = int(cig_dig_list[i])
deletion_length_list.append(deletion_length)
### for variation detection start
var_pos = genome_pos
var_seq = str(deletion_length) #var seq for deletion is length of deletion
var_type = cig_flag
update_variation_dict(scaffold,var_pos,var_type,var_seq,read_id)
### for variation detection end
genome_pos = genome_pos + deletion_length
d_count = d_count + deletion_length
continue
elif cig_flag == "N":
sj_pre_error_builder_list = [] # list of cigar string before splice junctions
sj_post_error_builder_list = [] # list of cigar string after splice junctions
# check for errors before splice junction pre
#########################################
prev_cig_flag = cig_char_list[i - 1]
prev_cig_length = int(cig_dig_list[i - 1])
#############################################################################################################################
# Add sj error info
prev_total_mismatch_length = 0
this_mismatch_length = 0 # keep track of where in the error list of cig
all_genome_mismatch_index = len(all_genome_mismatch_list) - 1
prev_total_cig_length = prev_cig_length
prev_cig_index = i - 1
this_cig_genome_pos = genome_pos
prev_sj_flag = 0
while prev_total_mismatch_length <= sj_err_threshold and prev_sj_flag == 0:
this_mismatch_length = this_mismatch_length + prev_cig_length
prev_total_mismatch_length = this_mismatch_length
if prev_cig_flag == "M":
# if no mismatch
if len(all_genome_mismatch_list) < 1: # no mismatch from M cigar regions up to this point
#last_genome_mismatch = -2 * sj_err_threshold
if prev_total_mismatch_length <= sj_err_threshold: # no mis matches for this gene
sj_pre_error_builder_list.append(str(prev_cig_length) + prev_cig_flag)
elif all_genome_mismatch_index >= 0:
last_genome_mismatch = all_genome_mismatch_list[all_genome_mismatch_index]
last_nuc_mismatch = all_nuc_mismatch_list[all_genome_mismatch_index]
dist_prev_mismatch = genome_pos - last_genome_mismatch
m_builder_add_count = 0
#while dist_prev_mismatch <= this_mismatch_length and dist_prev_mismatch <= sj_err_threshold:
while last_genome_mismatch >= this_cig_genome_pos - prev_cig_length and last_genome_mismatch <= this_cig_genome_pos and dist_prev_mismatch <= sj_err_threshold: # dist_prev_mismatch is still in range of sj threshold
sj_pre_error_builder_list.append(str(dist_prev_mismatch) + "." + last_nuc_mismatch)
m_builder_add_count += 1
all_genome_mismatch_index -= 1
last_genome_mismatch = all_genome_mismatch_list[all_genome_mismatch_index]
last_nuc_mismatch = all_nuc_mismatch_list[all_genome_mismatch_index]
dist_prev_mismatch = genome_pos - last_genome_mismatch
if all_genome_mismatch_index < 0:
break
if m_builder_add_count == 0:
if prev_total_mismatch_length <= sj_err_threshold: # still within in sj threshold so output
sj_pre_error_builder_list.append(str(prev_cig_length) + prev_cig_flag)
elif all_genome_mismatch_index < 0: # length of mismatch list is not 0 but index has gone past that
#last_genome_mismatch = -2 * sj_err_threshold
if prev_total_mismatch_length <= sj_err_threshold: # still within in sj threshold so output
sj_pre_error_builder_list.append(str(prev_cig_length) + prev_cig_flag)
else:
print("Error with all_genome_mismatch_index")
print(all_genome_mismatch_list)
print(all_genome_mismatch_index)
sys.exit()
elif prev_cig_flag != "N":
prev_cig_flag = cig_char_list[prev_cig_index]
prev_cig_length = int(cig_dig_list[prev_cig_index])
sj_pre_error_builder_list.append(str(prev_cig_length) + prev_cig_flag)
elif prev_cig_flag == "N":
prev_sj_flag = 1
prev_cig_index -= 1
if prev_cig_index < 0 :
break
this_cig_genome_pos = this_cig_genome_pos - prev_cig_length
prev_cig_flag = cig_char_list[prev_cig_index]
prev_cig_length = int(cig_dig_list[prev_cig_index])
if len(sj_pre_error_builder_list) == 0: # in case no errors are found
sj_pre_error_builder_list.append(no_mismatch_flag)
# end of adding sj error info prev
#############################################################################################################################
#########################################
# not related to sj error should probably move this up or down code of this block
#########################################
intron_length = int(cig_dig_list[i])
genome_pos = genome_pos + intron_length
#########################################
#check for errors after splice junction post
#########################################
next_cig_flag = cig_char_list[i + 1]
next_cig_length = int(cig_dig_list[i + 1])
#############################################################################################################################
next_total_mismatch_length = 0
this_mismatch_length = 0 # keep track of where in the error list of cig
all_genome_mismatch_index = len(all_genome_mismatch_list) - 1
next_total_cig_length = next_cig_length
next_cig_index = i + 1
this_next_seq_pos = seq_pos
this_genome_pos = genome_pos
#genome_mismatch_index = 0
next_sj_flag = 0
while next_total_mismatch_length <= sj_err_threshold and next_sj_flag == 0:
this_mismatch_length = this_mismatch_length + next_cig_length
next_total_mismatch_length = this_mismatch_length
if next_cig_flag == "M":
match_length = next_cig_length
seq_start = this_next_seq_pos
seq_end = this_next_seq_pos + match_length
genome_start = this_genome_pos
genome_end = this_genome_pos + match_length
seq_slice = seq_list[seq_start:seq_end]
genome_slice = fasta_dict[scaffold][genome_start:genome_end]
#######################################################
#print(str(match_length)+next_cig_flag)
#print(seq_start)
#print(seq_end)
######################################################
genome_mismatch_list, seq_mismatch_list, nuc_mismatch_list = mismatch_seq(genome_slice, seq_slice, genome_start, seq_start)
genome_mismatch_index = 0
# if no mismatch
if len(genome_mismatch_list) < 1: # no mismatch in this cig entry
#dist_next_mismatch = 2 * sj_err_threshold
if next_total_mismatch_length <= sj_err_threshold:
sj_post_error_builder_list.append(str(next_cig_length) + next_cig_flag)
elif genome_mismatch_index <= len(genome_mismatch_list):
nuc_next_mismatch = nuc_mismatch_list[0]
#dist_next_mismatch = genome_mismatch_list[0] - this_genome_pos
dist_next_mismatch = genome_mismatch_list[0] - genome_pos
m_builder_add_count = 0
while dist_next_mismatch <= this_mismatch_length and dist_next_mismatch < sj_err_threshold:
next_genome_mismatch = genome_mismatch_list[genome_mismatch_index]
next_nuc_mismatch = nuc_mismatch_list[genome_mismatch_index]
#dist_next_mismatch = next_genome_mismatch - this_genome_pos
dist_next_mismatch = next_genome_mismatch - genome_pos
if dist_next_mismatch <= sj_err_threshold: # this mismatch goes beyond this M length
sj_post_error_builder_list.append(str(dist_next_mismatch) + "." + next_nuc_mismatch)
m_builder_add_count += 1
genome_mismatch_index += 1
if genome_mismatch_index >= len(genome_mismatch_list):
break
if m_builder_add_count == 0:
if next_total_mismatch_length <= sj_err_threshold:
sj_post_error_builder_list.append(str(next_cig_length) + next_cig_flag)
else:
print("Error with genome_mismatch_index")
sys.exit()
elif next_cig_flag != "N":
next_cig_flag = cig_char_list[next_cig_index]
next_cig_length = int(cig_dig_list[next_cig_index])
sj_post_error_builder_list.append(str(next_cig_length) + next_cig_flag)
elif next_cig_flag == "N":
next_sj_flag = 1
if next_cig_flag != "D": # increase this seq pos only if not a deletion
this_next_seq_pos = this_next_seq_pos + next_cig_length
if next_cig_flag != "I":
this_genome_pos = this_genome_pos + next_cig_length
next_cig_index += 1
if next_cig_index >= len(cig_char_list):
break
next_cig_flag = cig_char_list[next_cig_index]
next_cig_length = int(cig_dig_list[next_cig_index])
if len(sj_post_error_builder_list) == 0: # in case no errors are found
sj_post_error_builder_list.append(no_mismatch_flag)
# end of adding sj error info post
#############################################################################################################################
#############################################################################################################################
sj_post_error_builder_line = "_".join(sj_post_error_builder_list)
sj_pre_error_builder_list_reverse = list(reversed(sj_pre_error_builder_list))
sj_pre_error_builder_line = "_".join(sj_pre_error_builder_list_reverse)
sj_pre_error_list.append(sj_pre_error_builder_line)
sj_post_error_list.append(sj_post_error_builder_line)
#########################################
continue
else:
match_length = int(cig_dig_list[i])
print("Error with cigar flag")
print(str(match_length) + cig_flag)
print(cig_dig_list)
print(cig_char_list)
sys.exit()
#print("calc_error_rate")
#print(cigar)
#blah = ",".join([str(h_count),str(s_count),str(i_count),str(d_count),str(mis_count)])
#print(blah)
#print(cig_dig_list)
#print(cig_char_list)
#sys.exit()
return h_count,s_count,i_count,d_count,mis_count,nomatch_dict,sj_pre_error_list,sj_post_error_list
####################################################################################################
#get variation information without calc_error_rate
#used to calculate error rate of mapping
def calc_variation(start_pos,cigar,seq_list,scaffold,read_id):
[cig_dig_list,cig_char_list] = cigar_list(cigar)
h_count = 0
s_count = 0
i_count = 0
d_count = 0
mis_count = 0
#walk through both the query seq and the mapped seq
genome_pos = start_pos - 1 #adjust for 1 base to 0 base coordinates
seq_pos = 0
for i in xrange(len(cig_dig_list)):
cig_flag = cig_char_list[i]
#print(seq_pos)
if cig_flag == "H":
h_count = h_count + int(cig_dig_list[i])
### for variation detection start
var_pos = genome_pos - h_count
var_seq = "N" * h_count
var_type = cig_flag
update_variation_dict(scaffold,var_pos,var_type,var_seq,read_id)
### for variation detection end
continue
elif cig_flag == "S":
s_count = s_count + int(cig_dig_list[i])
### for variation detection start
seq_start = seq_pos
seq_end = seq_pos + int(cig_dig_list[i])
var_pos = genome_pos - s_count
var_seq = seq_list[seq_start:seq_end]
var_type = cig_flag
###### change made 2019/11/19
#correct for soft clips at end of mapping
num_cig_entities = len(cig_dig_list)
if i == num_cig_entities - 1:
var_pos = genome_pos + 1 - s_count
###### change made 2019/11/19
update_variation_dict(scaffold,var_pos,var_type,var_seq,read_id)
### for variation detection end
#adjust seq position to account for missing soft mask in genome coord
seq_pos = seq_pos + int(cig_dig_list[i])
continue
elif cig_flag == "M":
match_length = int(cig_dig_list[i])
seq_start = seq_pos
seq_end = seq_pos + match_length
genome_start = genome_pos
genome_end = genome_pos + match_length
#seq_slice = seq_list[seq_start:seq_end+1]
#genome_slice = fasta_dict[scaffold][genome_start:genome_end+1]
seq_slice = seq_list[seq_start:seq_end]
genome_slice = fasta_dict[scaffold][genome_start:genome_end]
seq_diff = seq_end - seq_start
gen_diff = genome_end - genome_start
######################################################
#print(str(match_length)+cig_flag)
######################################################
#get number and location of mis matches
genome_mismatch_list,seq_mismatch_list,nuc_mismatch_list = mismatch_seq(genome_slice,seq_slice,genome_start,seq_start)
### for variation detection start
for mismatch_index in xrange(len(seq_mismatch_list)):
var_pos = genome_mismatch_list[mismatch_index]
seq_var_pos = seq_mismatch_list[mismatch_index]
var_seq = seq_list[seq_var_pos]
var_type = cig_flag
update_variation_dict(scaffold,var_pos,var_type,var_seq,read_id)
### for variation detection end
seq_pos = seq_pos + match_length
genome_pos = genome_pos + match_length
continue
elif cig_flag == "I":
### for variation detection start
seq_start = seq_pos
seq_end = seq_pos + int(cig_dig_list[i])
var_pos = genome_pos
var_seq = seq_list[seq_start:seq_end]
var_type = cig_flag
update_variation_dict(scaffold,var_pos,var_type,var_seq,read_id)
### for variation detection end
insertion_length = int(cig_dig_list[i])
seq_pos = seq_pos + insertion_length
continue
elif cig_flag == "D":
deletion_length = int(cig_dig_list[i])
### for variation detection start
var_pos = genome_pos
var_seq = str(deletion_length) #var seq for deletion is length of deletion
var_type = cig_flag
update_variation_dict(scaffold,var_pos,var_type,var_seq,read_id)
### for variation detection end
genome_pos = genome_pos + deletion_length
d_count = d_count + deletion_length
continue
elif cig_flag == "N":
#####################################################################################################################
#########################################
# not related to sj error should probably move this up or down code of this block
#########################################
intron_length = int(cig_dig_list[i])
genome_pos = genome_pos + intron_length
#########################################
#############################################################################################################################
continue
else:
match_length = int(cig_dig_list[i])
print("Error with cigar flag")
print(str(match_length) + cig_flag)
print(cig_dig_list)
print(cig_char_list)
sys.exit()
#####################################################################################################
## New calc error for low mem mode
#used to calculate error rate of mapping
def calc_error_rate_lowmem(start_pos,cigar,seq_list,scaffold,read_id):
[cig_dig_list,cig_char_list] = cigar_list(cigar)
h_count = 0
s_count = 0
i_count = 0
d_count = 0
mis_count = 0
all_genome_mismatch_list = []
all_nuc_mismatch_list = [] # this shows the nuc mismatch for the all_genome_mismatch_list
insertion_list = []
insertion_length_list = []
deletion_list = []
deletion_length_list = []
sj_pre_error_list = []
sj_post_error_list = []
nomatch_dict = {} # nomatch_dict[non match id] = list
nomatch_dict['mismatch_list'] = all_genome_mismatch_list
nomatch_dict['insertion_list'] = insertion_list
nomatch_dict['insertion_length_list'] = insertion_length_list
nomatch_dict['deletion_list'] = deletion_list
nomatch_dict['deletion_length_list'] = deletion_length_list
#walk through both the query seq and the mapped seq
genome_pos = start_pos - 1 #adjust for 1 base to 0 base coordinates
seq_pos = 0
for i in xrange(len(cig_dig_list)):
cig_flag = cig_char_list[i]
#print(seq_pos)
if cig_flag == "H":
h_count = h_count + int(cig_dig_list[i])
continue
elif cig_flag == "S":
s_count = s_count + int(cig_dig_list[i])
#adjust seq position to account for missing soft mask in genome coord
seq_pos = seq_pos + int(cig_dig_list[i])
continue
elif cig_flag == "M":
match_length = int(cig_dig_list[i])
seq_start = seq_pos
seq_end = seq_pos + match_length
genome_start = genome_pos
genome_end = genome_pos + match_length
seq_slice = seq_list[seq_start:seq_end]
genome_slice = fasta_dict[scaffold][genome_start:genome_end]
seq_diff = seq_end - seq_start
gen_diff = genome_end - genome_start
#get number and location of mis matches
genome_mismatch_list,seq_mismatch_list,nuc_mismatch_list = mismatch_seq(genome_slice,seq_slice,genome_start,seq_start)
mis_count = mis_count + len(genome_mismatch_list)
all_genome_mismatch_list.extend(genome_mismatch_list)
all_nuc_mismatch_list.extend(nuc_mismatch_list)
seq_pos = seq_pos + match_length
genome_pos = genome_pos + match_length
continue
elif cig_flag == "I":
insertion_length = int(cig_dig_list[i])
seq_pos = seq_pos + insertion_length
i_count = i_count + insertion_length
insertion_list.append(genome_pos)
insertion_length_list.append(insertion_length)
continue
elif cig_flag == "D":
deletion_list.append(genome_pos)
deletion_length = int(cig_dig_list[i])
deletion_length_list.append(deletion_length)
genome_pos = genome_pos + deletion_length
d_count = d_count + deletion_length
continue
elif cig_flag == "N":
sj_pre_error_builder_list = [] # list of cigar string before splice junctions
sj_post_error_builder_list = [] # list of cigar string after splice junctions
# check for errors before splice junction pre
#########################################
prev_cig_flag = cig_char_list[i - 1]
prev_cig_length = int(cig_dig_list[i - 1])
#############################################################################################################################
# Add sj error info
prev_total_mismatch_length = 0
this_mismatch_length = 0 # keep track of where in the error list of cig
all_genome_mismatch_index = len(all_genome_mismatch_list) - 1
prev_total_cig_length = prev_cig_length
prev_cig_index = i - 1
this_cig_genome_pos = genome_pos
prev_sj_flag = 0
while prev_total_mismatch_length <= sj_err_threshold and prev_sj_flag == 0:
this_mismatch_length = this_mismatch_length + prev_cig_length
prev_total_mismatch_length = this_mismatch_length
if prev_cig_flag == "M":
# if no mismatch
if len(all_genome_mismatch_list) < 1: # no mismatch from M cigar regions up to this point
#last_genome_mismatch = -2 * sj_err_threshold
if prev_total_mismatch_length <= sj_err_threshold: # no mis matches for this gene
sj_pre_error_builder_list.append(str(prev_cig_length) + prev_cig_flag)
elif all_genome_mismatch_index >= 0:
last_genome_mismatch = all_genome_mismatch_list[all_genome_mismatch_index]
last_nuc_mismatch = all_nuc_mismatch_list[all_genome_mismatch_index]
dist_prev_mismatch = genome_pos - last_genome_mismatch
m_builder_add_count = 0
#while dist_prev_mismatch <= this_mismatch_length and dist_prev_mismatch <= sj_err_threshold:
while last_genome_mismatch >= this_cig_genome_pos - prev_cig_length and last_genome_mismatch <= this_cig_genome_pos and dist_prev_mismatch <= sj_err_threshold: # dist_prev_mismatch is still in range of sj threshold
sj_pre_error_builder_list.append(str(dist_prev_mismatch) + "." + last_nuc_mismatch)
m_builder_add_count += 1
all_genome_mismatch_index -= 1
last_genome_mismatch = all_genome_mismatch_list[all_genome_mismatch_index]
last_nuc_mismatch = all_nuc_mismatch_list[all_genome_mismatch_index]
dist_prev_mismatch = genome_pos - last_genome_mismatch
if all_genome_mismatch_index < 0:
break
if m_builder_add_count == 0:
if prev_total_mismatch_length <= sj_err_threshold: # still within in sj threshold so output
sj_pre_error_builder_list.append(str(prev_cig_length) + prev_cig_flag)
elif all_genome_mismatch_index < 0: # length of mismatch list is not 0 but index has gone past that
#last_genome_mismatch = -2 * sj_err_threshold
if prev_total_mismatch_length <= sj_err_threshold: # still within in sj threshold so output
sj_pre_error_builder_list.append(str(prev_cig_length) + prev_cig_flag)
else:
print("Error with all_genome_mismatch_index")
print(all_genome_mismatch_list)
print(all_genome_mismatch_index)
sys.exit()
elif prev_cig_flag != "N":
prev_cig_flag = cig_char_list[prev_cig_index]
prev_cig_length = int(cig_dig_list[prev_cig_index])
sj_pre_error_builder_list.append(str(prev_cig_length) + prev_cig_flag)
elif prev_cig_flag == "N":
prev_sj_flag = 1
prev_cig_index -= 1
if prev_cig_index < 0 :
break
this_cig_genome_pos = this_cig_genome_pos - prev_cig_length
prev_cig_flag = cig_char_list[prev_cig_index]
prev_cig_length = int(cig_dig_list[prev_cig_index])
if len(sj_pre_error_builder_list) == 0: # in case no errors are found
sj_pre_error_builder_list.append(no_mismatch_flag)
# end of adding sj error info prev
#############################################################################################################################
#########################################
# not related to sj error should probably move this up or down code of this block
#########################################
intron_length = int(cig_dig_list[i])
genome_pos = genome_pos + intron_length
#########################################
#check for errors after splice junction post
#########################################
next_cig_flag = cig_char_list[i + 1]
next_cig_length = int(cig_dig_list[i + 1])
#############################################################################################################################
next_total_mismatch_length = 0
this_mismatch_length = 0 # keep track of where in the error list of cig
all_genome_mismatch_index = len(all_genome_mismatch_list) - 1
next_total_cig_length = next_cig_length
next_cig_index = i + 1
this_next_seq_pos = seq_pos
this_genome_pos = genome_pos
#genome_mismatch_index = 0
next_sj_flag = 0
while next_total_mismatch_length <= sj_err_threshold and next_sj_flag == 0:
this_mismatch_length = this_mismatch_length + next_cig_length
next_total_mismatch_length = this_mismatch_length
if next_cig_flag == "M":
match_length = next_cig_length
seq_start = this_next_seq_pos
seq_end = this_next_seq_pos + match_length
genome_start = this_genome_pos
genome_end = this_genome_pos + match_length
seq_slice = seq_list[seq_start:seq_end]
genome_slice = fasta_dict[scaffold][genome_start:genome_end]
#######################################################
#print(str(match_length)+next_cig_flag)
#print(seq_start)
#print(seq_end)
######################################################
genome_mismatch_list, seq_mismatch_list, nuc_mismatch_list = mismatch_seq(genome_slice, seq_slice, genome_start, seq_start)
genome_mismatch_index = 0
# if no mismatch
if len(genome_mismatch_list) < 1: # no mismatch in this cig entry
#dist_next_mismatch = 2 * sj_err_threshold
if next_total_mismatch_length <= sj_err_threshold:
sj_post_error_builder_list.append(str(next_cig_length) + next_cig_flag)
elif genome_mismatch_index <= len(genome_mismatch_list):
nuc_next_mismatch = nuc_mismatch_list[0]
#dist_next_mismatch = genome_mismatch_list[0] - this_genome_pos
dist_next_mismatch = genome_mismatch_list[0] - genome_pos
m_builder_add_count = 0
while dist_next_mismatch <= this_mismatch_length and dist_next_mismatch < sj_err_threshold:
next_genome_mismatch = genome_mismatch_list[genome_mismatch_index]
next_nuc_mismatch = nuc_mismatch_list[genome_mismatch_index]
#dist_next_mismatch = next_genome_mismatch - this_genome_pos
dist_next_mismatch = next_genome_mismatch - genome_pos
if dist_next_mismatch <= sj_err_threshold: # this mismatch goes beyond this M length
sj_post_error_builder_list.append(str(dist_next_mismatch) + "." + next_nuc_mismatch)
m_builder_add_count += 1
genome_mismatch_index += 1
if genome_mismatch_index >= len(genome_mismatch_list):
break
if m_builder_add_count == 0:
if next_total_mismatch_length <= sj_err_threshold:
sj_post_error_builder_list.append(str(next_cig_length) + next_cig_flag)
else:
print("Error with genome_mismatch_index")
sys.exit()
elif next_cig_flag != "N":
next_cig_flag = cig_char_list[next_cig_index]
next_cig_length = int(cig_dig_list[next_cig_index])
sj_post_error_builder_list.append(str(next_cig_length) + next_cig_flag)
elif next_cig_flag == "N":
next_sj_flag = 1
if next_cig_flag != "D": # increase this seq pos only if not a deletion
this_next_seq_pos = this_next_seq_pos + next_cig_length
if next_cig_flag != "I":
this_genome_pos = this_genome_pos + next_cig_length
next_cig_index += 1
if next_cig_index >= len(cig_char_list):
break
next_cig_flag = cig_char_list[next_cig_index]
next_cig_length = int(cig_dig_list[next_cig_index])
if len(sj_post_error_builder_list) == 0: # in case no errors are found
sj_post_error_builder_list.append(no_mismatch_flag)
# end of adding sj error info post
#############################################################################################################################
#############################################################################################################################
sj_post_error_builder_line = "_".join(sj_post_error_builder_list)
sj_pre_error_builder_list_reverse = list(reversed(sj_pre_error_builder_list))
sj_pre_error_builder_line = "_".join(sj_pre_error_builder_list_reverse)
sj_pre_error_list.append(sj_pre_error_builder_line)
sj_post_error_list.append(sj_post_error_builder_line)
#########################################
continue
else:
match_length = int(cig_dig_list[i])
print("Error with cigar flag")
print(str(match_length) + cig_flag)
print(cig_dig_list)
print(cig_char_list)
sys.exit()
return h_count,s_count,i_count,d_count,mis_count,nomatch_dict,sj_pre_error_list,sj_post_error_list
####################################################################################################
class Transcript:
def __init__(self, cluster_id):
self.cluster_id = cluster_id
self.trans_id = cluster_id #for flexible calling
self.sam_flag = "none"
self.scaff_name = "none"
self.start_pos = -1
self.cigar = "none"
self.read_seq = "none"
self.seq_length = 0
self.seq_list = []
self.map_seq_length = 0
self.h_count = "none"
self.s_count = "none"
self.i_count = "none"
self.d_count = "none"
self.mis_count = "none"
self.nomatch_dict = {}
self.end_pos = 0
self.exon_start_list = []
self.exon_end_list = []
self.num_exons = 0
self.strand = ""
self.downstream_seq = []
self.dseq_length = 0
self.a_count = 0
self.n_count = 0
self.a_percent = 0.0
self.n_percent = 0.0
self.percent_cov = 0.0
self.percent_identity = 0.0
self.sj_hash = "none" # use this to speed up matching
def add_sam_info(self,sam_flag,scaff_name,start_pos,cigar,read_seq,seq_list):
self.sam_flag = sam_flag
self.scaff_name = scaff_name
self.start_pos = int(start_pos)
self.cigar = cigar
self.read_seq = read_seq
self.seq_list = seq_list
#need to add hard clipped seq later if there is any
self.seq_length = len(read_seq)
if sam_flag_dict[sam_flag] == "forward_strand":
self.strand = "+"
elif sam_flag_dict[sam_flag] == "reverse_strand":
self.strand = "-"
else:
print("Error with interpretting SAM flag")
sys.exit()
def add_map_seq_length(self,map_seq_length):
self.map_seq_length = map_seq_length
def add_exon_coords(self,end_pos,exon_start_list,exon_end_list):
self.exon_start_list = exon_start_list
self.exon_end_list = exon_end_list
self.end_pos = end_pos
self.num_exons = len(exon_start_list)
def make_sj_hash_string(self):
################### 2020/07/26
# make sj hash
exon_start_string_list = []
exon_end_string_list = []
for exon_index in xrange(len(self.exon_start_list)):
exon_start_string_list.append(str(self.exon_start_list[exon_index]))
exon_end_string_list.append(str(self.exon_end_list[exon_index]))
sj_right_list = exon_start_string_list
sj_right_list.pop(0)
sj_left_list = exon_end_string_list
sj_left_list.pop(-1)
sj_left_line = ",".join(sj_left_list)
sj_right_line = ",".join(sj_right_list)
self.sj_hash = ";".join([sj_left_line,sj_right_line])
###################
def make_sj_hash_int(self):
################### 2020/07/26
# make sj hash
self.sj_hash = 0
for exon_index in xrange(len(self.exon_start_list)-1):
self.sj_hash = self.sj_hash + (self.exon_start_list[exon_index+1] * self.exon_end_list[exon_index])
###################
def add_mismatch(self,h_count,s_count,i_count,d_count,mis_count,nomatch_dict,sj_pre_error_list,sj_post_error_list):
self.h_count = int(h_count)
self.s_count = int(s_count)
self.i_count = int(i_count)
self.d_count = int(d_count)
self.mis_count = int(mis_count)
self.nomatch_dict = nomatch_dict
self.sj_pre_error_list = sj_pre_error_list
self.sj_post_error_list = sj_post_error_list
#hard clipped seq is missing in given read seq so need to add
self.seq_length = self.seq_length + h_count
def calc_coverage(self):
percent_cov = float(self.seq_length - self.h_count - self.s_count) / float(self.seq_length)
percent_cov = percent_cov * 100.0
self.percent_cov = percent_cov
return percent_cov
def calc_identity(self):
map_seq_length = self.seq_length - self.h_count - self.s_count
if ident_calc_method == "ident_cov":
nonmatch_count = self.h_count + self.s_count + self.i_count + self.d_count + self.mis_count
percent_identity = float(self.seq_length - nonmatch_count) / float(self.seq_length)
percent_identity = percent_identity * 100.0
elif ident_calc_method == "ident_map":
nonmatch_count = self.i_count + self.d_count + self.mis_count
percent_identity = float(map_seq_length - nonmatch_count) / float(map_seq_length)
percent_identity = percent_identity * 100.0
self.percent_identity = percent_identity
return percent_identity
def make_error_line(self):
error_list = []
error_list.append(str(self.h_count))
error_list.append(str(self.s_count))
error_list.append(str(self.i_count))
error_list.append(str(self.d_count))
error_list.append(str(self.mis_count))
error_line = ";".join(error_list)
return error_line
def make_exon_start_end_lines(self):
exon_start_string_list = []
exon_end_string_list = []
for i in xrange(len(self.exon_start_list)):
exon_start_string_list.append(str(self.exon_start_list[i]))
exon_end_string_list.append(str(self.exon_end_list[i]))
exon_start_string_line = ",".join(exon_start_string_list)
exon_end_string_line = ",".join(exon_end_string_list)
return exon_start_string_line,exon_end_string_line
def add_polya_info(self,downstream_seq,dseq_length,a_count,n_count,a_percent,n_percent):
self.downstream_seq = downstream_seq
self.dseq_length = dseq_length
self.a_count = a_count
self.n_count = n_count
self.a_percent = a_percent
self.n_percent = n_percent
def format_bed_line(self,final_trans_id):
bed_list = []
bed_list.append(str(self.scaff_name))
bed_list.append(str(self.start_pos-1))
bed_list.append(str(self.end_pos-1))
gene_id = self.trans_id.split(".")[0]
id_line = ";".join([final_trans_id,self.trans_id])
bed_list.append(str(id_line))
bed_list.append("40")
bed_list.append(self.strand)
bed_list.append(str(self.start_pos-1))
bed_list.append(str(self.end_pos-1))
bed_list.append("255,0,0")
bed_list.append(str(self.num_exons))
relative_exon_start_list = []
exon_length_list = []
for i in xrange(self.num_exons):
exon_start = self.exon_start_list[i]
exon_end = self.exon_end_list[i]
exon_length = exon_end - exon_start
relative_exon_start = exon_start - self.start_pos
relative_exon_start_list.append(str(relative_exon_start))
exon_length_list.append(str(exon_length))
relative_exon_start_line = ",".join(relative_exon_start_list)
exon_length_line = ",".join(exon_length_list)
bed_list.append(exon_length_line)
bed_list.append(relative_exon_start_line)
bed_line = "\t".join(bed_list)
return bed_line
####################################################################################################
class Merged:
def __init__(self, trans_id):
self.trans_id = trans_id
#same as collapse start and end list but used for flexibility in calling
self.exon_start_list = []
self.exon_end_list = []
self.scaff_name = "none"
self.merged_trans_dict = {} # merged_trans_dict[trans id] = trans obj
self.trans_list = []
self.trans_obj_list = []
self.strand = "none"
self.num_trans = 0
self.collapse_start_list = []
self.collapse_end_list = []
self.start_wobble_list = []
self.end_wobble_list = []
self.start_pos = 0
self.end_pos = 0
self.num_exons = 0
def add_merged_trans(self,trans_obj):
merged_trans_id = trans_obj.cluster_id
#self.trans_list.append(merged_trans_id)
self.trans_obj_list.append(trans_obj)
self.merged_trans_dict[merged_trans_id] = trans_obj
merged_trans_id_list = self.merged_trans_dict.keys()
self.trans_list = self.merged_trans_dict.keys()
self.num_trans = len(merged_trans_id_list)
if self.num_exons < trans_obj.num_exons:
self.num_exons = trans_obj.num_exons
self.scaff_name = trans_obj.scaff_name
if self.strand == "none":
self.strand = trans_obj.strand
elif self.strand != trans_obj.strand:
print("Error with merged trans not on the same strand")
sys.exit()
def add_merge_info(self,collapse_start_list,collapse_end_list,start_wobble_list,end_wobble_list,collapse_sj_start_err_list,collapse_sj_end_err_list,collapse_start_error_nuc_list,collapse_end_error_nuc_list ):
self.collapse_start_list = collapse_start_list
self.collapse_end_list = collapse_end_list
self.start_wobble_list = start_wobble_list
self.end_wobble_list = end_wobble_list
self.exon_start_list = collapse_start_list
self.exon_end_list = collapse_end_list
self.start_pos = collapse_start_list[0]
self.end_pos = collapse_end_list[-1]
self.collapse_sj_start_err_list = collapse_sj_start_err_list
self.collapse_sj_end_err_list = collapse_sj_end_err_list
self.collapse_start_error_nuc_list = collapse_start_error_nuc_list
self.collapse_end_error_nuc_list = collapse_end_error_nuc_list
def format_bed_line(self):
bed_list = []
bed_list.append(str(self.scaff_name))
bed_list.append(str(self.start_pos-1))
bed_list.append(str(self.end_pos-1))
gene_id = self.trans_id.split(".")[0]
id_line = ";".join([gene_id,self.trans_id])
bed_list.append(str(id_line))
bed_list.append("40")
bed_list.append(self.strand)
bed_list.append(str(self.start_pos-1))
bed_list.append(str(self.end_pos-1))
bed_list.append("255,0,0")
bed_list.append(str(self.num_exons))
relative_exon_start_list = []
exon_length_list = []
for i in xrange(self.num_exons):
exon_start = self.collapse_start_list[i]
exon_end = self.collapse_end_list[i]
exon_length = exon_end - exon_start
relative_exon_start = exon_start - self.start_pos
relative_exon_start_list.append(str(relative_exon_start))
exon_length_list.append(str(exon_length))
if(relative_exon_start < 0):
print("negative exon_length")
print(exon_length)
print(id_line)
sys.exit()
if(relative_exon_start < 0):
print("negative relative_exon_start")
print(relative_exon_start)
print(id_line)
sys.exit()
relative_exon_start_line = ",".join(relative_exon_start_list)
exon_length_line = ",".join(exon_length_list)
bed_list.append(exon_length_line)
bed_list.append(relative_exon_start_line)
bed_line = "\t".join(bed_list)
return bed_line
def format_trans_report_line(self):
trans_report_list = []
trans_report_list.append(self.trans_id)
trans_report_list.append(str(self.num_trans))
high_quality_percent = -1.0
low_quality_percent = -1.0
high_cov_percent = -1.0
low_cov_percent = -1.0
for merged_trans_id in self.merged_trans_dict:
merged_trans_obj = self.merged_trans_dict[merged_trans_id]
quality_percent = merged_trans_obj.calc_identity()
coverage_percent = merged_trans_obj.calc_coverage()
if high_quality_percent == -1.0:
high_quality_percent = quality_percent
low_quality_percent = quality_percent
high_cov_percent = coverage_percent
low_cov_percent = coverage_percent
else:
if high_quality_percent < quality_percent:
high_quality_percent = quality_percent
if low_quality_percent > quality_percent:
low_quality_percent = quality_percent
if high_cov_percent < coverage_percent:
high_cov_percent = coverage_percent
if low_cov_percent > coverage_percent:
low_cov_percent = coverage_percent
high_quality_percent_str = str(round(high_quality_percent,2))
low_quality_percent_str = str(round(low_quality_percent,2))
high_cov_percent_str = str(round(high_cov_percent,2))
low_cov_percent_str = str(round(low_cov_percent,2))
trans_report_list.append(high_cov_percent_str)
trans_report_list.append(low_cov_percent_str)
trans_report_list.append(high_quality_percent_str)
trans_report_list.append(low_quality_percent_str)
start_wobble_string_list = []
end_wobble_string_list = []
for i in xrange(len(self.start_wobble_list)):
start_wobble_string = str(self.start_wobble_list[i])
start_wobble_string_list.append(start_wobble_string)
end_wobble_string = str(self.end_wobble_list[i])
end_wobble_string_list.append(end_wobble_string)
start_wobble_line = ",".join(start_wobble_string_list)
end_wobble_line = ",".join(end_wobble_string_list)
trans_report_list.append(start_wobble_line)
trans_report_list.append(end_wobble_line)
collapse_sj_start_err_list_str = []
collapse_sj_end_err_list_str = []
for i in xrange(len(self.collapse_sj_start_err_list)):
collapse_sj_start_err_list_str.append(str(self.collapse_sj_start_err_list[i]))
collapse_sj_end_err_list_str.append(str(self.collapse_sj_end_err_list[i]))
if self.strand == "+":
collapse_sj_start_err_list_str = list(reversed(collapse_sj_start_err_list_str))
collapse_sj_end_err_list_str = list(reversed(collapse_sj_end_err_list_str))
collapse_sj_start_err_list_line = ",".join(collapse_sj_start_err_list_str)
collapse_sj_end_err_list_line = ",".join(collapse_sj_end_err_list_str)
trans_report_list.append(collapse_sj_start_err_list_line)
trans_report_list.append(collapse_sj_end_err_list_line)
collapse_error_nuc_list = self.collapse_start_error_nuc_list
if len(collapse_error_nuc_list) > 1:
if self.strand == "+":
if collapse_error_nuc_list[-1] == "na":
collapse_error_nuc_list.pop(-1)
collapse_error_nuc_list = list(reversed(collapse_error_nuc_list))
else:
print("Error with collapse_error_nuc_list")
sys.exit()
elif self.strand == "-":
if collapse_error_nuc_list[0] == "na":
collapse_error_nuc_list.pop(0)
else:
print("Error with collapse_error_nuc_list")
sys.exit()
else:
print("Error with strand in format_trans_report_line")
sys.exit
####################################################
# create simple error line
# all_sj_both_error_simple_list = []
# if collapse_error_nuc_list[0] != "na":
# for sj_error_nuc in collapse_error_nuc_list:
# sj_pre_error_line = sj_error_nuc.split(">")[0]
# sj_post_error_line = sj_error_nuc.split(">")[1]
#
# sj_pre_error_split = sj_pre_error_line.split("_")
# sj_post_error_split = sj_post_error_line.split("_")
#
# sj_pre_error_simple_string,sj_post_error_simple_string = simple_sj_error(sj_pre_error_split, sj_post_error_split)
#
# sj_both_error_simple_string = sj_pre_error_simple_string + ">" + sj_post_error_simple_string
#
# all_sj_both_error_simple_list.append(sj_both_error_simple_string)
#
# all_sj_both_error_simple_list_line = ";".join(all_sj_both_error_simple_list)
# else:
# all_sj_both_error_simple_list_line = "na"
####################################################
collapse_error_nuc_list_line = ";".join(collapse_error_nuc_list)
trans_report_list.append(collapse_error_nuc_list_line)
# trans_report_list.append(all_sj_both_error_simple_list_line)
trans_report_line = "\t".join(trans_report_list)
return trans_report_line
#above this line are def's used for looping through sam file
####################################################################################################
####################################################################################################
####################################################################################################
#below this line are def's use for post sam pocessing
####################################################################################################
def fuzzy_match(coord1,coord2,diff_threshold): # use this to allow for fuzzy matches of splice junctions
diff_num = 0
match_flag = "none"
#diff_threshold = 10
if coord1 == coord2:
match_flag = "perfect_match"
else:
diff_num = coord1 - coord2
if match_flag == "none":
if abs(diff_num) <= diff_threshold:
match_flag = "wobbly_match"
else:
match_flag = "no_match"
if match_flag == "none":
print("Error with match flag")
sys.exit()
return match_flag,diff_num
####################################################################################################
## added 2020/07/26
# use this for speeding up transcript matching
def exact_match_capped(trans_obj,o_trans_obj,strand):
exact_match_flag = "not_exact"
a_trans_start = trans_obj.start_pos
a_trans_end = trans_obj.end_pos
b_trans_start = o_trans_obj.start_pos
b_trans_end = o_trans_obj.end_pos
this_start_diff = abs(a_trans_start - b_trans_start)
this_end_diff = abs(a_trans_end - b_trans_end)
if strand == "+":
if this_start_diff <= fiveprime_threshold and this_end_diff <= threeprime_threshold:
sj_hash_a = trans_obj.sj_hash
sj_hash_b = o_trans_obj.sj_hash
if sj_hash_a == sj_hash_b:
exact_match_flag = "exact_match"
elif strand == "-":
if this_start_diff <= threeprime_threshold and this_end_diff <= fiveprime_threshold:
sj_hash_a = trans_obj.sj_hash
sj_hash_b = o_trans_obj.sj_hash
if sj_hash_a == sj_hash_b:
exact_match_flag = "exact_match"
return exact_match_flag
####################################################################################################
## added 2020/07/26
# use this for speeding up transacript matching , NO CAP
def exact_match_nocap(trans_obj,o_trans_obj,strand):
exact_match_flag = "not_exact"
if strand == "+":
a_trans_end = trans_obj.end_pos
b_trans_end = o_trans_obj.end_pos
this_end_diff = abs(a_trans_end - b_trans_end)
if this_end_diff <= threeprime_threshold:
sj_hash_a = trans_obj.sj_hash
sj_hash_b = o_trans_obj.sj_hash
if sj_hash_a == sj_hash_b:
exact_match_flag = "exact_match"
elif strand == "-":
a_trans_start = trans_obj.start_pos
b_trans_start = o_trans_obj.start_pos
this_start_diff = abs(a_trans_start - b_trans_start)
if this_start_diff <= threeprime_threshold:
sj_hash_a = trans_obj.sj_hash
sj_hash_b = o_trans_obj.sj_hash
if sj_hash_a == sj_hash_b:
exact_match_flag = "exact_match"
return exact_match_flag
####################################################################################################
def compare_transcripts(trans_obj,o_trans_obj,fiveprime_cap_flag,strand): #use this to compare two transcripts
diff_num_exon_flag = 0
max_exon_num = 0
min_exon_num = 0
#exon_diff_threshold = 10
#fiveprime_threshold = 10
#threeprime_threshold = 10
a_trans_id = trans_obj.cluster_id
b_trans_id = o_trans_obj.cluster_id
e_start_list = trans_obj.exon_start_list
o_e_start_list = o_trans_obj.exon_start_list
e_end_list = trans_obj.exon_end_list
o_e_end_list = o_trans_obj.exon_end_list
if len(e_start_list) != len(o_e_start_list):
diff_num_exon_flag = len(e_start_list) - len(o_e_start_list)
trans_comp_flag = "none"
short_trans = "none"
long_trans = "none"
if fiveprime_cap_flag == "capped" and diff_num_exon_flag != 0: # if 5prime capped then should have same number of exons
trans_comp_flag = "diff_transcripts"
start_match_list = []
start_diff_list = []
end_match_list = []
end_diff_list = []
short_trans = "none"
min_exon_num = 0
else:
#get max and min number of exons
if len(e_start_list) > len(o_e_start_list):
max_exon_num = len(e_start_list)
min_exon_num = len(o_e_start_list)
short_trans = o_trans_obj.cluster_id
elif len(e_start_list) < len(o_e_start_list):
max_exon_num = len(o_e_start_list)
min_exon_num = len(e_start_list)
short_trans = trans_obj.cluster_id
elif len(e_start_list) == len(o_e_start_list):
max_exon_num = len(o_e_start_list)
min_exon_num = len(e_start_list)
#do this for nocap libraries
if strand == "+":
if e_start_list[0] < o_e_start_list[0]:
short_trans = o_trans_obj.cluster_id
long_trans = trans_obj.cluster_id
elif e_start_list[0] > o_e_start_list[0]:
short_trans = trans_obj.cluster_id
long_trans = o_trans_obj.cluster_id
elif e_start_list[0] == o_e_start_list[0]:
short_trans = "same"
long_trans = "same"
else:
print("Error with short and long trans identification")
print(trans_obj.cluster_id + " " + o_trans_obj.cluster_id)
sys.exit()
elif strand == "-":
if e_end_list[0] > o_e_end_list[0]:
short_trans = o_trans_obj.cluster_id
long_trans = trans_obj.cluster_id
elif e_end_list[0] < o_e_end_list[0]:
short_trans = trans_obj.cluster_id
long_trans = o_trans_obj.cluster_id
elif e_end_list[0] == o_e_end_list[0]:
short_trans = "same"
long_trans = "same"
else:
print("Error with short and long trans identification")
print(trans_obj.cluster_id + " - " + o_trans_obj.cluster_id + " strand: " + strand)
sys.exit()
start_match_list = []
start_diff_list = []
end_match_list = []
end_diff_list = []
all_match_flag = 1 # 1 if all matching and 0 if at least one not matching
for i in xrange(min_exon_num):
if strand == "+":
j = -1 * (i + 1) #iterate from last exon to account for possible 5' degradation for forward strand
elif strand == "-":
j = i # iterate from first exon for reverse strand
e_start = e_start_list[j]
o_e_start = o_e_start_list[j]
e_end = e_end_list[j]
o_e_end = o_e_end_list[j]
# check for micro exons which do not overlap but fit in wobble range
if e_start >= o_e_end:
trans_comp_flag = "diff_transcripts"
start_match_list = []
start_diff_list = []
end_match_list = []
end_diff_list = []
short_trans = "none"
min_exon_num = 0
continue
if o_e_start >= e_end:
trans_comp_flag = "diff_transcripts"
start_match_list = []
start_diff_list = []
end_match_list = []
end_diff_list = []
short_trans = "none"
min_exon_num = 0
continue
start_threshold = exon_diff_threshold
end_threshold = exon_diff_threshold
if strand == "+":
if i == 0: #use three prime threshold if this is last exon
end_threshold = threeprime_threshold
if diff_num_exon_flag == 0 and i == min_exon_num-1: #use 5 prime threshold if this is first exon
start_threshold = fiveprime_threshold
elif strand == "-":
if i == 0: #use three prime threshold if this is last exon
start_threshold = threeprime_threshold
if diff_num_exon_flag == 0 and i == min_exon_num-1: #use 5 prime threshold if this is first exon
end_threshold = fiveprime_threshold
start_match_flag,start_diff_num = fuzzy_match(e_start,o_e_start,start_threshold)
end_match_flag,end_diff_num = fuzzy_match(e_end,o_e_end,end_threshold)
#use this for the condition that no 5 prime cap and shorter 5 prime end
if fiveprime_cap_flag == "no_cap" and i == min_exon_num-1: # if no 5 cap and this is last 5 prime exon
if strand == "+":
if end_match_flag == "no_match":
all_match_flag = 0
elif start_match_flag == "no_match" and all_match_flag == 1:
# check that shorter transcript has shorter 5' end
if b_trans_id == short_trans and start_diff_num < 0 and diff_num_exon_flag != 0:
trans_comp_flag = "same_three_prime_diff_exons"
elif a_trans_id == short_trans and start_diff_num > 0 and diff_num_exon_flag != 0:
trans_comp_flag = "same_three_prime_diff_exons"
#elif short_trans == "same" and start_diff_num < 0 and diff_num_exon_flag == 0: # if short trans is same then same number of exons
elif start_diff_num < 0 and diff_num_exon_flag == 0: # if short trans is same then same number of exons
short_trans = b_trans_id
trans_comp_flag = "same_three_prime_same_exons"
#elif short_trans == "same" and start_diff_num > 0 and diff_num_exon_flag == 0: # if short trans is same then same number of exons
elif start_diff_num > 0 and diff_num_exon_flag == 0: # if short trans is same then same number of exons
short_trans = a_trans_id
trans_comp_flag = "same_three_prime_same_exons"
else:
all_match_flag = 0
#################################################################################################################
########################## Continue here! 2017-06-13 Figure out how to differentiate shorter transcript!!!!! Need to define shorter/longer roles for trans grouping!!
#################################################################################################################
elif strand == "-":
if start_match_flag == "no_match":
all_match_flag = 0
elif end_match_flag == "no_match" and all_match_flag == 1:
# check that shorter transcript has shorter 5' end
if b_trans_id == short_trans and end_diff_num > 0 and diff_num_exon_flag != 0:
trans_comp_flag = "same_three_prime_diff_exons"
elif a_trans_id == short_trans and end_diff_num < 0 and diff_num_exon_flag != 0:
trans_comp_flag = "same_three_prime_diff_exons"
#elif short_trans == "same" and end_diff_num > 0 and diff_num_exon_flag == 0: # if short trans is same then same number of exons
elif end_diff_num > 0 and diff_num_exon_flag == 0: # if short trans is same then same number of exons
short_trans = b_trans_id
trans_comp_flag = "same_three_prime_same_exons"
#elif short_trans == "same" and end_diff_num < 0 and diff_num_exon_flag == 0: # if short trans is same then same number of exons
elif end_diff_num < 0 and diff_num_exon_flag == 0: # if short trans is same then same number of exons
short_trans = a_trans_id
trans_comp_flag = "same_three_prime_same_exons"
else:
all_match_flag = 0
# if capped or if nocap without being the last exon use normal matching procedure
else:
if start_match_flag == "no_match":
all_match_flag = 0
if end_match_flag == "no_match":
all_match_flag = 0
start_match_list.append(start_match_flag)
start_diff_list.append(start_diff_num)
end_match_list.append(end_match_flag)
end_diff_list.append(end_diff_num)
#cleanup trans_comp_flag
if trans_comp_flag == "none":
if all_match_flag == 1:
if diff_num_exon_flag == 0 and fiveprime_cap_flag == "capped":
trans_comp_flag = "same_transcript"
elif diff_num_exon_flag == 0 and fiveprime_cap_flag == "no_cap":
trans_comp_flag = "same_three_prime_same_exons"
elif diff_num_exon_flag != 0 and fiveprime_cap_flag == "no_cap":
trans_comp_flag = "same_three_prime_diff_exons"
else:
print("Error with missing trans_comp_flag ")
print(trans_comp_flag)
print(diff_num_exon_flag)
print(max_exon_num)
print(min_exon_num)
print(start_match_list)
print(end_match_list)
sys.exit()
else:
trans_comp_flag = "diff_transcripts"
#Keep in mind that the lists are ordered from 3' end to 5' end
# if trans_obj.trans_id == '1_14_c81301/2/1370' and o_trans_obj.trans_id == '1_14_c147778/1/1455':
# print("blah blah")
# print(trans_comp_flag)
# print(start_match_list)
# print(end_match_list)
# print(start_diff_list)
# print(end_diff_list)
# print(min_exon_num)
# sys.exit()
# if trans_obj.trans_id == '1_14_c147778/1/1455' and o_trans_obj.trans_id == '1_14_c81301/2/1370':
# print("blah blah")
# print(trans_comp_flag)
# print(start_match_list)
# print(end_match_list)
# print(start_diff_list)
# print(end_diff_list)
# print(min_exon_num)
# sys.exit()
return trans_comp_flag,start_match_list,start_diff_list,end_match_list,end_diff_list,short_trans,long_trans,min_exon_num,diff_num_exon_flag
####################################################################################################
def sj_error_priority_start(sj_pre_error,sj_post_error):
if sj_post_error == no_mismatch_flag and sj_pre_error == no_mismatch_flag:
e_start_priority = 0
elif sj_post_error == no_mismatch_flag and sj_pre_error != no_mismatch_flag:
e_start_priority = 1
elif sj_post_error != no_mismatch_flag and sj_pre_error == no_mismatch_flag:
e_start_priority = 2
elif sj_post_error != no_mismatch_flag and sj_pre_error != no_mismatch_flag:
e_start_priority = 3
else:
print("Error with splice junction priority start")
sys.exit()
return e_start_priority
def sj_error_priority_end(sj_pre_error, sj_post_error):
if sj_post_error == no_mismatch_flag and sj_pre_error == no_mismatch_flag:
e_end_priority = 0
elif sj_post_error != no_mismatch_flag and sj_pre_error == no_mismatch_flag:
e_end_priority = 1
elif sj_post_error == no_mismatch_flag and sj_pre_error != no_mismatch_flag:
e_end_priority = 2
elif sj_post_error != no_mismatch_flag and sj_pre_error != no_mismatch_flag:
e_end_priority = 3
else:
print("Error with splice junction priority end")
sys.exit()
return e_end_priority
####################################################################################################
def sj_error_priority_finder(trans_obj,i,max_exon_num):
#figure out errors near splice junctions
######################################################
e_start_list = trans_obj.exon_start_list
e_end_list = trans_obj.exon_end_list
strand = trans_obj.strand
exon_num = len(e_start_list)
if max_exon_num == "none":
max_exon_num = exon_num
sj_pre_error_list = trans_obj.sj_pre_error_list
sj_post_error_list = trans_obj.sj_post_error_list
#use these to record the error type
e_start_priority_error = "na"
e_end_priority_error = "na"
priority_error_delimit = ">"
if len(e_start_list) > 1: # if there are splice junctions to cover
if strand == "+":
if i == 0: # last 3' exon accoridng to genome
e_end_priority = 0
sj_pre_error = sj_pre_error_list[-1]
sj_post_error = sj_post_error_list[-1]
e_start_priority = sj_error_priority_start(sj_pre_error, sj_post_error)
e_start_priority_error = sj_pre_error + priority_error_delimit + sj_post_error
elif i > 0 and i < max_exon_num - 1: # middle exons
sj_pre_error_start = sj_pre_error_list[-1 - i]
sj_post_error_start = sj_post_error_list[-1 - i]
sj_pre_error_end = sj_pre_error_list[-i]
sj_post_error_end = sj_post_error_list[-i]
e_start_priority = sj_error_priority_start(sj_pre_error_start, sj_post_error_start)
e_end_priority = sj_error_priority_end(sj_pre_error_end, sj_post_error_end)
e_start_priority_error = sj_pre_error_start + priority_error_delimit + sj_post_error_start
e_end_priority_error = sj_pre_error_end + priority_error_delimit + sj_post_error_end
elif i == max_exon_num - 1: # first 5' exon
e_start_priority = 0
sj_pre_error_end = sj_pre_error_list[-i]
sj_post_error_end = sj_post_error_list[-i]
e_end_priority = sj_error_priority_end(sj_pre_error_end, sj_post_error_end)
e_end_priority_error = sj_pre_error_end + priority_error_delimit + sj_post_error_end
else:
print("Error with plus strand start and end priority")
print(max_exon_num)
print(i)
sys.exit()
elif strand == "-":
if i == 0: # 5' first exon according to genome
e_start_priority = 0
sj_post_error = sj_post_error_list[0]
sj_pre_error = sj_pre_error_list[0]
e_end_priority = sj_error_priority_end(sj_pre_error, sj_post_error)
e_end_priority_error = sj_pre_error + priority_error_delimit + sj_post_error
elif i > 0 and i < max_exon_num - 1: # middle exons
sj_pre_error_start = sj_pre_error_list[i-1]
sj_post_error_start = sj_post_error_list[i-1]
sj_pre_error_end = sj_pre_error_list[i]
sj_post_error_end = sj_post_error_list[i]
e_start_priority = sj_error_priority_start(sj_pre_error_start, sj_post_error_start)
e_end_priority = sj_error_priority_end(sj_pre_error_end, sj_post_error_end)
e_start_priority_error = sj_pre_error_start + priority_error_delimit + sj_post_error_start
e_end_priority_error = sj_pre_error_end + priority_error_delimit + sj_post_error_end
elif i == max_exon_num - 1: # last 3' exon according to genome
e_end_priority = 0
sj_pre_error_start = sj_pre_error_list[i - 1]
sj_post_error_start = sj_post_error_list[i - 1]
e_start_priority = sj_error_priority_start(sj_pre_error_start, sj_post_error_start)
e_start_priority_error = sj_pre_error_start + priority_error_delimit + sj_post_error_start
else:
print("Error with minus strand start and end priority")
print(max_exon_num)
print(i)
sys.exit()
else: # if 1 exon read
e_start_priority = 0
e_end_priority = 0
# you can turn priority on or off with arguments
if sj_priority_flag == "no_priority":
e_start_priority = 0
e_end_priority = 0
elif sj_priority_flag != "sj_priority":
print("Error with splice junction priority flag. Please use no_priority or sj_priority.")
sys.exit()
return e_start_priority,e_end_priority,e_start_priority_error,e_end_priority_error
####################################################################################################
def length_error_type(error_string,error_report):
# 0>0.C.A;0>0;1I_5M>0;1I_7M>0;8.A.T_7.T.A>8M_1D;0>0
if len(error_string.split(".")) == 3:
error_type = "mismatch"
elif error_string == "0":
error_type = "0"
elif "I" in error_string:
error_type = "I"
elif "D" in error_string:
error_type = "D"
elif "M" in error_string:
error_type = "M"
elif "S" in error_string:
error_type = "S"
elif "H" in error_string:
error_type = "H"
else:
print("Error with error_string: error type not recognized")
print(error_string)
print(error_report)
sys.exit()
if error_type == "0":
error_length = 0
if error_type == "I":
if len(error_string.split("I")) != 2:
print("Error with error_string: Insertion line issue")
print(error_string)
print(error_report)
sys.exit()
error_length = int(error_string.split("I")[0])
elif error_type == "D":
if len(error_string.split("D")) != 2:
print("Error with error_string: Deletion line issue")
print(error_string)
print(error_report)
sys.exit()
error_length = int(error_string.split("D")[0])
elif error_type == "S":
if len(error_string.split("S")) != 2:
print("Error with error_string: Soft clipping line issue")
print(error_string)
print(error_report)
sys.exit()
error_length = int(error_string.split("S")[0])
elif error_type == "H":
if len(error_string.split("H")) != 2:
print("Error with error_string: Hard clipping line issue")
print(error_string)
print(error_report)
sys.exit()
error_length = int(error_string.split("H")[0])
elif error_type == "mismatch": # this is a mismatch error representing only one nt position
error_length = 1
elif error_type == "M": # This is a match and not an error
if len(error_string.split("M")) != 2:
print("Error with error_string: M line issue")
print(error_string)
print(error_report)
sys.exit()
error_length = int(error_string.split("M")[0])
return error_length,error_type
def convert_int_list_to_string(int_list):
str_list = []
for int_val in int_list:
str_list.append(str(int_val))
return str_list
def simple_sj_error(sj_pre_error_split,sj_post_error_split):
#ses_match_char = "_"
################################################################################
################################################################################
sj_pre_error_simple_list = []
sj_pre_error_count = 1
sj_pre_error_split_reverse = sj_pre_error_split
sj_pre_error_split_reverse.reverse()
for sj_pre_error_string in sj_pre_error_split_reverse:
# sj_pre_error_count += 1
if len(sj_pre_error_string.split(".")) == 3:
mismatch_position = int(sj_pre_error_string.split(".")[0])
mismatch_position += 1
if sj_pre_error_count == 1:
for j in xrange(mismatch_position - 1):
sj_pre_error_simple_list.append(ses_match_char)
sj_pre_error_count += 1
sj_pre_error_simple_list.append("X")
sj_pre_error_count += 1
elif sj_pre_error_count > 1:
m_pos_diff = mismatch_position - sj_pre_error_count
# for j in xrange(m_pos_diff-1):
for j in xrange(m_pos_diff):
sj_pre_error_simple_list.append(ses_match_char)
sj_pre_error_count += 1
sj_pre_error_simple_list.append("X")
sj_pre_error_count += 1
elif len(sj_pre_error_string.split(".")) == 1:
if sj_pre_error_string == "0":
for j in xrange(sj_err_threshold):
sj_pre_error_simple_list.append(ses_match_char)
else:
[cig_dig_list, cig_char_list] = cigar_list(sj_pre_error_string)
cig_dig = int(cig_dig_list[0])
cig_char = cig_char_list[0]
if cig_char == "M":
for j in xrange(cig_dig):
sj_pre_error_simple_list.append(ses_match_char)
sj_pre_error_count += 1
elif cig_char == "I":
for j in xrange(cig_dig):
sj_pre_error_simple_list.append("I")
sj_pre_error_count += 1
elif cig_char == "D":
for j in xrange(cig_dig):
sj_pre_error_simple_list.append("D")
sj_pre_error_count += 1
elif cig_char == "S":
for j in xrange(cig_dig):
sj_pre_error_simple_list.append("S")
sj_pre_error_count += 1
elif cig_char == "H":
for j in xrange(cig_dig):
sj_pre_error_simple_list.append("H")
sj_pre_error_count += 1
else:
print("Error with LDE error char")
sys.exit()
if len(sj_pre_error_simple_list) < sj_err_threshold:
for j in xrange(sj_err_threshold - len(sj_pre_error_simple_list)):
sj_pre_error_simple_list.append(ses_match_char)
sj_pre_error_simple_list_reverse = sj_pre_error_simple_list
sj_pre_error_simple_list_reverse.reverse()
sj_pre_error_simple_string = "".join(sj_pre_error_simple_list_reverse)
################################################################################
################################################################################
sj_post_error_simple_list = []
sj_post_error_count = 1
for sj_post_error_string in sj_post_error_split:
# sj_post_error_count += 1
if len(sj_post_error_string.split(".")) == 3:
mismatch_position = int(sj_post_error_string.split(".")[0])
mismatch_position += 1
if sj_post_error_count == 1:
for j in xrange(mismatch_position - 1):
sj_post_error_simple_list.append(ses_match_char)
sj_post_error_count += 1
sj_post_error_simple_list.append("X")
sj_post_error_count += 1
elif sj_post_error_count > 1:
m_pos_diff = mismatch_position - sj_post_error_count
# for j in xrange(m_pos_diff-1):
for j in xrange(m_pos_diff):
sj_post_error_simple_list.append(ses_match_char)
sj_post_error_count += 1
sj_post_error_simple_list.append("X")
sj_post_error_count += 1
#################################################################################################Continue here
elif len(sj_post_error_string.split(".")) == 1:
if sj_post_error_string == "0":
for j in xrange(sj_err_threshold):
sj_post_error_simple_list.append(ses_match_char)
else:
[cig_dig_list, cig_char_list] = cigar_list(sj_post_error_string)
cig_dig = int(cig_dig_list[0])
cig_char = cig_char_list[0]
if cig_char == "M":
for j in xrange(cig_dig):
sj_post_error_simple_list.append(ses_match_char)
sj_post_error_count += 1
elif cig_char == "I":
for j in xrange(cig_dig):
sj_post_error_simple_list.append("I")
sj_post_error_count += 1
elif cig_char == "D":
for j in xrange(cig_dig):
sj_post_error_simple_list.append("D")
sj_post_error_count += 1
elif cig_char == "S":
for j in xrange(cig_dig):
sj_post_error_simple_list.append("S")
sj_post_error_count += 1
elif cig_char == "H":
for j in xrange(cig_dig):
sj_post_error_simple_list.append("H")
sj_post_error_count += 1
else:
print("Error with LDE error char")
sys.exit()
if len(sj_post_error_simple_list) < sj_err_threshold:
for j in xrange(sj_err_threshold - len(sj_post_error_simple_list)):
sj_post_error_simple_list.append(ses_match_char)
sj_post_error_simple_string = "".join(sj_post_error_simple_list)
return sj_pre_error_simple_string,sj_post_error_simple_string
def sj_error_local_density(trans_obj):
# figure out errors near splice junctions
######################################################
e_start_list = trans_obj.exon_start_list
e_end_list = trans_obj.exon_end_list
strand = trans_obj.strand
exon_num = len(e_start_list)
trans_cigar = trans_obj.cigar
max_exon_num = exon_num
sj_pre_error_list = trans_obj.sj_pre_error_list
sj_post_error_list = trans_obj.sj_post_error_list
# use these to record the error type
e_start_priority_error = "na"
e_end_priority_error = "na"
priority_error_delimit = ">"
bad_sj_num_list = [] # list of the splice junctions that failed to pass quality threshold
bad_sj_error_count_list = [] # number of errors at each SJ
bad_sj_num_pre_list = []
bad_sj_num_post_list = []
bad_sj_flag = 0 # if 0 then no bad SJ, if higher than 0 then there are bad SJ
sj_error_list = []
sj_error_nuc_list = []
all_sj_post_error_simple_list = []
all_sj_pre_error_simple_list = []
all_sj_both_error_simple_list = []
for i in xrange(max_exon_num-1):
this_bad_sj_flag = 0
sj_pre_error_i_count = 0
sj_post_error_i_count = 0
sj_pre_error_d_count = 0
sj_post_error_d_count = 0
sj_pre_error_m_count = 0
sj_post_error_m_count = 0
sj_pre_error_s_count = 0
sj_post_error_s_count = 0
sj_pre_error_h_count = 0
sj_post_error_h_count = 0
sj_pre_error_all_line = ""
sj_post_error_all_line = ""
sj_pre_error = sj_pre_error_list[i]
sj_post_error = sj_post_error_list[i]
sj_error_nuc_string = sj_pre_error + ">" + sj_post_error
sj_error_nuc_list.append(sj_error_nuc_string)
# 0>0.C.A;0>0;1I_5M>0;1I_7M>0;8.A.T_7.T.A>8M_1D;0>0
sj_pre_error_split = sj_pre_error.split("_")
sj_post_error_split = sj_post_error.split("_")
sj_pre_error_simple_string,sj_post_error_simple_string = simple_sj_error(sj_pre_error_split, sj_post_error_split)
all_sj_post_error_simple_list.append(sj_post_error_simple_string)
all_sj_pre_error_simple_list.append(sj_pre_error_simple_string)
all_sj_both_error_simple_list.append(sj_pre_error_simple_string + ">" +sj_post_error_simple_string)
#print(e_start_list)
#print(e_end_list)
#print(sj_pre_error_list)
#print(sj_post_error_list)
for sj_pre_error_char in sj_pre_error_split:
error_length,error_type = length_error_type(sj_pre_error_char,trans_cigar)
#########################################################################################
#########################################################################################
#########################################################################################Continue here RK 2018/10/09
if error_type == "mismatch":
sj_pre_error_m_count = sj_pre_error_m_count + error_length
elif error_type == "I":
sj_pre_error_i_count = sj_pre_error_i_count + error_length
elif error_type == "D":
sj_pre_error_d_count = sj_pre_error_d_count + error_length
elif error_type == "S":
sj_pre_error_s_count = sj_pre_error_s_count + error_length
elif error_type == "H":
sj_pre_error_h_count = sj_pre_error_h_count + error_length
for sj_post_error_char in sj_post_error_split:
error_length,error_type = length_error_type(sj_post_error_char,trans_cigar)
#####################################################Continue here RK 2018/10/09
if error_type == "mismatch":
sj_post_error_m_count = sj_post_error_m_count + error_length
elif error_type == "I":
sj_post_error_i_count = sj_post_error_i_count + error_length
elif error_type == "D":
sj_post_error_d_count = sj_post_error_d_count + error_length
elif error_type == "S":
sj_post_error_s_count = sj_post_error_s_count + error_length
elif error_type == "H":
sj_post_error_h_count = sj_post_error_h_count + error_length
sj_pre_error_all_count = sj_pre_error_i_count + sj_pre_error_d_count + sj_pre_error_m_count + sj_pre_error_s_count + sj_pre_error_h_count
sj_post_error_all_count = sj_post_error_i_count + sj_post_error_d_count + sj_post_error_m_count + sj_post_error_s_count + sj_post_error_h_count
sj_pre_error_all_line = ",".join([str(sj_pre_error_i_count),str(sj_pre_error_d_count),str(sj_pre_error_m_count),str(sj_pre_error_s_count),str(sj_pre_error_h_count)])
sj_post_error_all_line = ",".join([str(sj_post_error_i_count),str(sj_post_error_d_count),str(sj_post_error_m_count),str(sj_post_error_s_count),str(sj_post_error_h_count)])
sj_all_error_all_line = ">".join([sj_pre_error_all_line,sj_post_error_all_line])
sj_error_list.append(sj_all_error_all_line)
if strand == "+":
sj_num = i + 1
elif strand == "-":
sj_num = max_exon_num - i - 1
if sj_pre_error_all_count > lde_threshold:
bad_sj_flag += 1
this_bad_sj_flag = 1
bad_sj_num_pre_list.append(sj_num)
if sj_post_error_all_count > lde_threshold:
bad_sj_flag += 1
this_bad_sj_flag = 1
bad_sj_num_post_list.append(sj_num)
if this_bad_sj_flag > 0 :
bad_sj_num_list.append(sj_num)
sj_error_string = str(sj_pre_error_all_count) + ">" + str(sj_post_error_all_count)
bad_sj_error_count_list.append(sj_error_string)
sj_lde_flag = "na"
if bad_sj_flag == 0:
sj_lde_flag = "lde_pass"
elif bad_sj_flag > 0:
sj_lde_flag = "lde_fail"
else:
print("Error with sj_lde_flag")
sys.exit()
#prepare lde outline
if len(sj_error_list) > 0:
sj_error_line = ";".join(sj_error_list)
elif len(sj_error_list) == 0:
sj_error_line = "na"
else:
print("Error with sj_error_line")
sys.exit()
bad_sj_num_str_list = convert_int_list_to_string(bad_sj_num_list)
if len(bad_sj_num_str_list) > 0:
bad_sj_num_line = ",".join(bad_sj_num_str_list)
elif len(bad_sj_num_str_list) == 0:
bad_sj_num_line = "0"
else:
print("Error with bad_sj_num_line")
sys.exit()
if len(bad_sj_error_count_list) > 0 :
bad_sj_error_count_line = ",".join(bad_sj_error_count_list)
elif len(bad_sj_error_count_list) == 0 :
bad_sj_error_count_line = "na"
else:
print("Error with bad_sj_error_count_line")
sys.exit()
num_exon_str = str(max_exon_num)
#####################################################
#generate detailed error profile around splice junctions
if len(sj_error_nuc_list) > 0:
collapse_error_nuc_list_line = ";".join(sj_error_nuc_list)
else:
collapse_error_nuc_list_line = "na"
if len(all_sj_both_error_simple_list) > 0:
all_sj_both_error_simple_line = ";".join(all_sj_both_error_simple_list)
else:
all_sj_both_error_simple_line = "na"
#####################################################
lde_outlist = []
lde_outlist.append(trans_obj.cluster_id)
lde_outlist.append(sj_lde_flag)
lde_outlist.append(trans_obj.scaff_name)
lde_outlist.append(str(trans_obj.start_pos))
lde_outlist.append(str(trans_obj.end_pos))
lde_outlist.append(trans_obj.strand)
lde_outlist.append(num_exon_str)
lde_outlist.append(bad_sj_num_line)
lde_outlist.append(bad_sj_error_count_line)
lde_outlist.append(sj_error_line)
lde_outlist.append(collapse_error_nuc_list_line)
lde_outlist.append(all_sj_both_error_simple_line)
lde_outlist.append(trans_obj.cigar)
lde_outline = "\t".join(lde_outlist)
return bad_sj_flag,bad_sj_num_list,bad_sj_num_pre_list,bad_sj_num_post_list,bad_sj_error_count_list,lde_outline
##############################################################
def collapse_transcripts(trans_obj_list,fiveprime_cap_flag,collapse_flag): #use this to collapse transcripts
# all supplied transcripts will be merged
# create supplied transcripts by comparing with compare transcripts
try:
collapse_flag
except NameError:
print("collapse_flag not defined, using default of most commond ends")
collapse_flag == "common_ends"
max_exon_num = 0
strand = "none"
num_trans = len(trans_obj_list)
collapse_trans_id_list = []
#get strand and max exon num
for trans_obj in trans_obj_list:
collapse_trans_id_list.append(trans_obj.trans_id)
e_start_list = trans_obj.exon_start_list
if strand == "none":
strand = trans_obj.strand
elif trans_obj.strand != strand:
print("mismatch in strand from trans_obj_list for collapsing def")
sys.exit()
exon_num = len(e_start_list)
if exon_num > max_exon_num:
max_exon_num = exon_num
collapse_start_list = []
collapse_end_list = []
#use these to record actualy nuc mismatch info for collapsed models
collapse_start_error_nuc_list = []
collapse_end_error_nuc_list = []
#use these to keep track of errors near splice junctions
collapse_sj_start_err_list = []
collapse_sj_end_err_list = []
#track how much wobble for the starts and end in the collapse
start_wobble_list = []
end_wobble_list = []
for i in xrange(max_exon_num): #go from 3 prime end
if strand == "+":
j = -1 * (i + 1) #iterate from last exon to account for possible 5' degradation for forward strand
elif strand == "-":
j = i # iterate from first exon for reverse strand
e_start_dict = {} # e_start_dict[priority number][start] = number of occurrences
e_end_dict = {} # e_end_dict[priority number][end] = number of occurrences
priority_error_start_dict = {} #priority_error_start_dict[priority number][start][error string] = 1
priority_error_end_dict = {} #priority_error_end_dict[priority number][end][error string] = 1
#e_start_dict = {} # e_start_dict[start] = number of occurrences
#e_end_dict = {} # e_end_dict[end] = number of occurrences
#use these to get wobble despite sj priority algorithm
e_start_range_list = []
e_end_range_list = []
for trans_obj in trans_obj_list:
e_start_list = trans_obj.exon_start_list
e_end_list = trans_obj.exon_end_list
# figure out errors near splice junctions
######################################################
sj_pre_error_list = trans_obj.sj_pre_error_list
sj_post_error_list = trans_obj.sj_post_error_list
this_max_exon_num = len(e_start_list)
if i >= len(e_start_list):# use for no cap when exon numbers may not match
continue
e_start_priority, e_end_priority, e_start_priority_error,e_end_priority_error = sj_error_priority_finder(trans_obj, i, this_max_exon_num) ####################################
e_start = int(e_start_list[j])
e_end = int(e_end_list[j])
#Use this to prevent using truncated 5' end of trans without max exon num
if fiveprime_cap_flag == "no_cap":####################################################################
#do not use 5' end if this is not priority level one for start
if i == len(e_start_list)-1 and i < max_exon_num-1:
if strand == "+":
e_start = -1
elif strand == "-":
e_end = -1
if e_start != -1: # check that it isnt truncated 5' end
if e_start_priority not in e_start_dict:
e_start_dict[e_start_priority] = {}
priority_error_start_dict[e_start_priority] = {} #####################################################
if e_start not in e_start_dict[e_start_priority]:
e_start_dict[e_start_priority][e_start] = 0
priority_error_start_dict[e_start_priority][e_start] = {} #####################################################
e_start_range_list.append(e_start) #use these to get wobble despite sj priority algorithm
e_start_dict[e_start_priority][e_start] += 1
priority_error_start_dict[e_start_priority][e_start][e_start_priority_error] = 1
else:
if log_flag == "log_on":
print("truncated transcript")
#e_start_dict[e_start_priority] = {}
#priority_error_start_dict[e_start_priority] = {}
if e_end != -1: # check that it isnt truncated 5' end
if e_end_priority not in e_end_dict:
e_end_dict[e_end_priority] = {}
priority_error_end_dict[e_end_priority] = {}#####################################################
if e_end not in e_end_dict[e_end_priority]:
e_end_dict[e_end_priority][e_end] = 0
priority_error_end_dict[e_end_priority][e_end] = {} #####################################################
e_end_range_list.append(e_end) #use these to get wobble despite sj priority algorithm
e_end_dict[e_end_priority][e_end] += 1
priority_error_end_dict[e_end_priority][e_end][e_end_priority_error] = 1
else:
if log_flag == "log_on":
print("truncated transcript")
#e_end_dict[e_end_priority] = {}
#priority_error_end_dict[e_end_priority] = {}
##########################################
best_e_start = -1
long_e_start = -1
short_e_start = -1
num_starts = 0
priority_start_list = list(e_start_dict.keys())
priority_end_list = list(e_end_dict.keys())
priority_start_list.sort()
priority_end_list.sort()
best_start_priority = priority_start_list[0]
best_end_priority = priority_end_list[0]
collapse_sj_start_err_list.append(best_start_priority)
collapse_sj_end_err_list.append(best_end_priority)
for e_start in e_start_dict[best_start_priority]:
if e_start_dict[best_start_priority][e_start] > num_starts:
best_e_start = e_start
num_starts = e_start_dict[best_start_priority][e_start]
if long_e_start == -1:
long_e_start = e_start
if e_start < long_e_start:
long_e_start = e_start
if short_e_start == -1:
short_e_start = e_start
if e_start > short_e_start:
short_e_start = e_start
# if there are multiple most num e starts then choose the longest one
most_starts = num_starts
num_most_starts = 0
most_long_e_start = -1
for e_start in e_start_dict[best_start_priority]:
if e_start_dict[best_start_priority][e_start] == most_starts:
num_most_starts += 1
if most_long_e_start == -1:
most_long_e_start = e_start
elif most_long_e_start > e_start:
most_long_e_start = e_start
if num_most_starts > 1:
best_e_start = most_long_e_start
if num_trans > 2:
if log_flag == "log_on":
print("more than one best e start! " + str(best_e_start) + " num_trans: " + str(num_trans))
##########################################
e_start_range_list.sort()
e_start_wobble = e_start_range_list[-1] - e_start_range_list[0]
start_wobble_list.append(e_start_wobble)
best_e_end = 0
long_e_end = -1
short_e_end = -1
num_ends = 0
for e_end in e_end_dict[best_end_priority]:
if e_end_dict[best_end_priority][e_end] > num_ends:
best_e_end = e_end
num_ends = e_end_dict[best_end_priority][e_end]
if long_e_end == -1:
long_e_end = e_end
if e_end > long_e_end:
long_e_end = e_end
if short_e_end == -1:
short_e_end = e_end
if e_end < short_e_end:
short_e_end = e_end
# if there are multiple most num e ends then choose the longest one
most_ends = num_ends
num_most_ends = 0
most_long_e_end = -1
for e_end in e_end_dict[best_end_priority]:
if e_end_dict[best_end_priority][e_end] == most_ends:
num_most_ends += 1
if most_long_e_end == -1:
most_long_e_end = e_end
elif most_long_e_end < e_end:
most_long_e_end = e_end
if num_most_ends > 1:
best_e_end = most_long_e_end
if num_trans > 2:
if log_flag == "log_on":
print("more than one best e end! " + str(best_e_end) + " num_trans: " + str(num_trans))
##########################################
e_end_range_list.sort()
e_end_wobble = e_end_range_list[-1] - e_end_range_list[0]
end_wobble_list.append(e_end_wobble)
if fiveprime_cap_flag == "no_cap" and i+1 == max_exon_num: # use earliest 5' end for no cap libraries
if strand == "+":
best_e_start = long_e_start
elif strand == "-":
best_e_end = long_e_end
if collapse_flag == "longest_ends": #allow user to choose whether to use the most common ends or the longest ends, default is most common ends
if i+1 == max_exon_num:
if strand == "+":
best_e_start = long_e_start
elif strand == "-":
best_e_end = long_e_end
if i == 0:
if strand == "+":
best_e_end = long_e_end
elif strand == "-":
best_e_start = long_e_start
if best_e_start > best_e_end:
print("Error with collapsing, e_start bigger than e_end")
print(best_e_start)
print(best_e_end)
print(trans_obj.trans_id)
print(trans_obj.strand)
print(e_start_dict)
print(e_end_dict)
for trans_obj in trans_obj_list:
print(str(trans_obj.trans_id) + " ##########################")
print(trans_obj.exon_start_list)
print(trans_obj.exon_end_list)
if long_e_start < long_e_end:
best_e_start = long_e_start
best_e_end = long_e_end
else:
print("Error long_e_start is greater than long_e_end")
print(long_e_start)
print(long_e_end)
sys.exit()
collapse_start_list.append(best_e_start)
collapse_end_list.append(best_e_end)
##########################################################
###debugging
# print("debugging ==================")
# print(best_e_start)
# print(best_e_end)
# print(best_start_priority)
# print(long_e_start)
# print(most_long_e_start)
# print(i)
# print(max_exon_num)
# print(priority_error_start_dict)
# print(strand)
# print(trans_obj_list)
# print("debugging ==================")
##########################################################
priority_error_start_list = list(priority_error_start_dict[best_start_priority][best_e_start].keys()) ###########################################################################
priority_error_end_list = list(priority_error_end_dict[best_end_priority][best_e_end].keys())
priority_error_start_line = "-".join(priority_error_start_list)
priority_error_end_line = "-".join(priority_error_end_list)
collapse_start_error_nuc_list.append(priority_error_start_line)
collapse_end_error_nuc_list.append(priority_error_end_line)
#put the coords in the right order maintaining order with wobble lists
collapse_start_list, start_wobble_list = zip(*sorted(zip(collapse_start_list, start_wobble_list)))
collapse_end_list, end_wobble_list = zip(*sorted(zip(collapse_end_list, end_wobble_list)))
collapse_start_list = list(collapse_start_list)
start_wobble_list = list(start_wobble_list)
collapse_end_list = list(collapse_end_list)
end_wobble_list = list(end_wobble_list)
#######################################################
#Below: check start and end list to make sure there are no overlapping coordinates
prev_start = -1
prev_end = -1
for i in xrange(len(collapse_start_list)):
check_start = collapse_start_list[i]
check_end = collapse_end_list[i]
if check_end <= check_start: # exon end must always be greater than exon start
print("Error with exon end earlier than exon start")
print(str(check_start) + "\t" + str(check_end))
print(collapse_trans_id_list)
sys.exit()
if check_start <= prev_start: # next start should always be later than prev start
print("Error with this exon start not later than previous start")
print(str(prev_start) + "\t" + str(check_start))
print(collapse_trans_id_list)
sys.exit()
if check_end <= prev_end: # next start should always be later than prev start
print("Error with this exon end not later than previous end")
print(str(prev_end) + "\t" + str(check_end))
print(collapse_trans_id_list)
sys.exit()
prev_start = check_start
prev_end = check_end
# Above: check start and end list to make sure there are no overlapping coordinates
#######################################################
#flip order of sj err list if the strand is positive since we started from the 3' end.
# if strand == "+":
# collapse_sj_start_err_list = list(reversed(collapse_sj_start_err_list))
# collapse_sj_end_err_list = list(reversed(collapse_sj_end_err_list))
#collapse_start_error_nuc_list = list(reversed(collapse_start_error_nuc_list))
#collapse_end_error_nuc_list = list(reversed(collapse_end_error_nuc_list))
# elif strand == "-":
# collapse_sj_start_err_list = collapse_sj_start_err_list
# collapse_sj_end_err_list = collapse_sj_end_err_list
# else:
# print("Issue with strand value in collapse transcripts")
# print(strand)
# sys.exit()
return collapse_start_list,collapse_end_list,start_wobble_list,end_wobble_list,collapse_sj_start_err_list,collapse_sj_end_err_list,collapse_start_error_nuc_list,collapse_end_error_nuc_list
####################################################################################################
def gene_group(trans_list): #groups trans into genes, does not take into account strand
trans_obj_list = []
for trans_id in trans_list:
trans_obj_list.append(trans_obj_dict[trans_id])
gene_trans_dict = {} # gene_trans_dict[gene id][trans id] = 1
trans_gene_dict = {} # trans_gene_dict[trans id] = gene group
gene_start_dict = {} # gene_start_dict[gene num] = gene start
start_gene_dict = {} # start_gene_dict[start] = gene num
gene_count = 0
if len(trans_list) == 1:
gene_count += 1
trans_id = trans_list[0]
single_gene_start = trans_obj_list[0].exon_start_list[0]
gene_start_dict[gene_count] = single_gene_start
gene_trans_dict[gene_count] = {}
gene_trans_dict[gene_count][trans_id] = 1
trans_gene_dict[trans_id] = gene_count
for i in xrange(len(trans_obj_list)):
trans_obj = trans_obj_list[i]
for j in xrange(i+1,len(trans_obj_list)):
o_trans_obj = trans_obj_list[j]
trans_id = trans_obj.cluster_id
o_trans_id = o_trans_obj.cluster_id
#if trans_id == o_trans_id:#skip if same
# continue
if trans_id in trans_gene_dict and o_trans_id in trans_gene_dict: # skip if already in same group
if trans_gene_dict[trans_id] == trans_gene_dict[o_trans_id]:
continue
exon_start_list = trans_obj.exon_start_list
o_exon_start_list = o_trans_obj.exon_start_list
exon_end_list = trans_obj.exon_end_list
o_exon_end_list = o_trans_obj.exon_end_list
num_exons = len(exon_start_list)
o_num_exons = len(o_exon_start_list)
overlap_flag = 0
for i in xrange(num_exons): #search for overlapping exons
for j in xrange(o_num_exons):
exon_start = exon_start_list[i]
exon_end = exon_end_list[i]
o_exon_start = o_exon_start_list[j]
o_exon_end = o_exon_end_list[j]
if exon_start <= o_exon_end and exon_end >= o_exon_start:
overlap_flag = 1
if overlap_flag == 0: # no overlap make new gene groups
if trans_id not in trans_gene_dict: #if no gene groups make new one
gene_count += 1
trans_gene_dict[trans_id] = gene_count
gene_trans_dict[gene_count] = {}
gene_trans_dict[gene_count][trans_id] = 1
#add gene start
gene_start_dict[gene_count] = exon_start_list[0]
if o_trans_id not in trans_gene_dict: #if no gene groups make new one
gene_count += 1
trans_gene_dict[o_trans_id] = gene_count
gene_trans_dict[gene_count] = {}
gene_trans_dict[gene_count][o_trans_id] = 1
#add gene start
gene_start_dict[gene_count] = o_exon_start_list[0]
if overlap_flag == 1:
if trans_id not in trans_gene_dict and o_trans_id not in trans_gene_dict: #if no gene groups make new one
gene_count += 1
trans_gene_dict[trans_id] = gene_count
trans_gene_dict[o_trans_id] = gene_count
gene_trans_dict[gene_count] = {}
gene_trans_dict[gene_count][trans_id] = 1
gene_trans_dict[gene_count][o_trans_id] = 1
#add gene start
min_gene_start = exon_start_list[0]
if min_gene_start > o_exon_start_list[0]:
min_gene_start = o_exon_start_list[0]
gene_start_dict[gene_count] = min_gene_start
elif trans_id not in trans_gene_dict: # add to other gene group
gene_num = trans_gene_dict[o_trans_id]
trans_gene_dict[trans_id] = gene_num
gene_trans_dict[gene_num][trans_id] = 1
min_gene_start = exon_start_list[0]
if min_gene_start > o_exon_start_list[0]:
min_gene_start = o_exon_start_list[0]
gene_start_dict[gene_num] = min_gene_start
elif o_trans_id not in trans_gene_dict:# add to other gene group
gene_num = trans_gene_dict[trans_id]
trans_gene_dict[o_trans_id] = gene_num
gene_trans_dict[gene_num][o_trans_id] = 1
min_gene_start = exon_start_list[0]
if min_gene_start > o_exon_start_list[0]:
min_gene_start = o_exon_start_list[0]
gene_start_dict[gene_num] = min_gene_start
elif trans_id in trans_gene_dict and o_trans_id in trans_gene_dict:
gene_num = trans_gene_dict[trans_id]
o_gene_num = trans_gene_dict[o_trans_id]
if gene_num != o_gene_num: #merge gene groups
m_trans_id_list = list(gene_trans_dict[o_gene_num].keys())
for m_trans_id in m_trans_id_list:
trans_gene_dict[m_trans_id] = gene_num
gene_trans_dict[gene_num][m_trans_id] = 1
#delete old gene num
gene_trans_dict.pop(o_gene_num, None)
min_gene_start = gene_start_dict[gene_num]
if min_gene_start > gene_start_dict[o_gene_num]:
min_gene_start = gene_start_dict[o_gene_num]
gene_start_dict[gene_num] = min_gene_start
#delete old gene num
gene_start_dict.pop(o_gene_num, None)
if gene_num == o_gene_num: #same gene groups
continue
else:
print("Unknown condition in gene grouping")
sys.exit()
for gene_num in gene_start_dict: #make dict for coordinate to gene num
gene_start = gene_start_dict[gene_num]
if gene_start in start_gene_dict:
print("multiple gene starts!")
sys.exit()
start_gene_dict[gene_start] = gene_num
start_gene_list = start_gene_dict.keys()
start_gene_list.sort()
gene_start_trans_dict = {} # gene_start_trans_dict[gene start][trans id] = 1
for gene_start in start_gene_list: # make duct for gene starts to trans
gene_num = start_gene_dict[gene_start]
gene_start_trans_dict[gene_start] = {}
for trans_id in gene_trans_dict[gene_num]:
gene_start_trans_dict[gene_start][trans_id] = 1
return gene_start_trans_dict,start_gene_list
####################################################################################################
####################################################################################################
def iterate_sort_list(list_trans_pos_list,pos_index):
# sort the list by each element
sort_flag = 0
blah_flag = 0
while sort_flag == 0:
pre_pos = -1
same_order_index_dict = {} # same_order_index_dict[pos][index] = 1
# collect positions and index where the sort was equal
for j in xrange(len(list_trans_pos_list)):
trans_pos_line_split = list_trans_pos_list[j]
pos_element = trans_pos_line_split[pos_index]
if pos_element == pre_pos:
if pre_pos not in same_order_index_dict:
same_order_index_dict[pre_pos] = {}
same_order_index_dict[pre_pos][j-1] = 1
if pos_element not in same_order_index_dict:
same_order_index_dict[pos_element] = {}
same_order_index_dict[pos_element][j] = 1
pre_pos = pos_element
same_order_index_list = list(same_order_index_dict.keys())
same_order_index_list.sort()
if len(same_order_index_list) == 0:
sort_flag = 1
else:
for pos_element in same_order_index_list:
slice_pos_index_list = list(same_order_index_dict[pos_element].keys())
slice_pos_index_list.sort()
# check for iterative index
past_pos = "na"
for this_pos in slice_pos_index_list:
if past_pos == "na":
past_pos = this_pos
elif this_pos != past_pos + 1:
print("Error with non-consequtive indices for same pos in trans sorting.")
print(slice_pos_index_list)
sys.exit()
else:
past_pos = this_pos
min_index = slice_pos_index_list[0]
max_index = slice_pos_index_list[-1] + 1
# replace slice with ordered slice
list_slice = list_trans_pos_list[min_index:max_index]
list_slice.sort(key=lambda x: int(x[pos_index + 1]))
[list_slice, sort_flag] = iterate_sort_list(list_slice, pos_index + 1)
list_trans_pos_list[min_index:max_index] = list_slice
return [list_trans_pos_list,sort_flag]
####################################################################################################
def sort_pos_trans_list(pos_trans_list,pos_trans_dict):
# sort the pos_trans_list according to position of transcripts on chromosome
sorted_trans_list = []
new_pos_trans_dict = {}
list_trans_pos_list = [] # this is a list of lists
max_pos_num = 0
#get max number of elements for pos lists
for trans_pos_line in pos_trans_list:
trans_pos_line_split = trans_pos_line.split(",")
if max_pos_num < len(trans_pos_line_split):
max_pos_num = len(trans_pos_line_split)
for trans_pos_line in pos_trans_list:
trans_pos_line_split = trans_pos_line.split(",")
diff_pos = max_pos_num - len(trans_pos_line_split)
# pad out list so all pos lists have same number of elements
for i in xrange(diff_pos):
trans_pos_line_split.append(0)
trans_pos_line_split_str = []
for pos_element in trans_pos_line_split:
trans_pos_line_split_str.append(str(pos_element))
new_trans_pos_line = ",".join(trans_pos_line_split_str)
new_pos_trans_dict[new_trans_pos_line] = pos_trans_dict[trans_pos_line]
list_trans_pos_list.append(trans_pos_line_split)
#sort the list by each element
i = 0
list_trans_pos_list.sort(key=lambda x: int(x[i]))
pos_index = i
[list_trans_pos_list, sort_flag] = iterate_sort_list(list_trans_pos_list, pos_index)
if sort_flag != 1:
print("Error with sort flag!")
print(sort_flag)
sys.exit()
for trans_pos_line_split in list_trans_pos_list:
trans_pos_line_split_str = []
for pos_element in trans_pos_line_split:
trans_pos_line_split_str.append(str(pos_element))
new_trans_pos_line = ",".join(trans_pos_line_split_str)
sorted_trans_list.append(new_trans_pos_line)
return [sorted_trans_list,new_pos_trans_dict]
####################################################################################################
####################################################################################################
def sort_transcripts(trans_obj_list):
#sort transcripts by start-end-exon starts
pos_trans_dict = {} # pos_trans_dict[pos] = trans obj
pos_trans_list = []
for trans_obj in trans_obj_list:
trans_scaff = trans_obj.scaff_name
trans_exon_start_list = trans_obj.exon_start_list
trans_exon_end_list = trans_obj.exon_end_list
trans_exon_start_list.sort()
trans_exon_end_list.sort()
trans_start = trans_exon_start_list[0]
trans_end = trans_exon_end_list[-1]
trans_pos_list = []
trans_pos_list.append(str(trans_start))
trans_pos_list.append(",")
trans_pos_list.append(str(trans_end))
trans_pos_list.append(",")
num_exons = len(trans_exon_start_list)
for i in xrange(num_exons):
exon_start = trans_exon_start_list[i]
trans_pos_list.append(str(exon_start))
trans_pos_list.append(",")
exon_end = trans_exon_end_list[i]
trans_pos_list.append(str(exon_end))
trans_pos_list.append(",")
# remove last element because it is a comma
trans_pos_list.pop(-1)
trans_pos_line = "".join(trans_pos_list)
dup_detect_flag = 0 # use for signaling that a duplicate has been detected
#if this trans model is already present in the dict then this is a duplicate
if trans_pos_line in pos_trans_dict:
dup_detect_flag = 1
old_merge_id = pos_trans_dict[trans_pos_line].trans_id
new_merge_id = trans_obj.trans_id
#old_trans_list = pos_trans_dict[trans_pos_line].trans_list
#new_trans_list = trans_obj.trans_list
if log_flag == "log_on":
print("Duplicate transcript positions in transcript sorting!")
print(trans_obj.merged_trans_dict.keys())
print(str(trans_start)+" "+str(trans_end))
print(pos_trans_dict[trans_pos_line].merged_trans_dict.keys())
this_bed_line = trans_obj.format_bed_line()
other_bed_line = pos_trans_dict[trans_pos_line].format_bed_line()
if log_flag == "log_on":
print(this_bed_line)
print(other_bed_line)
print("a###########################################")
for a_uniq_trans_id in trans_obj.merged_trans_dict:
a_bed_line = trans_obj_dict[a_uniq_trans_id].format_bed_line(a_uniq_trans_id)
if log_flag == "log_on":
print(a_bed_line)
if log_flag == "log_on":
print("b###########################################")
for b_uniq_trans_id in pos_trans_dict[trans_pos_line].merged_trans_dict:
b_bed_line = trans_obj_dict[b_uniq_trans_id].format_bed_line(b_uniq_trans_id)
if log_flag == "log_on":
print(b_bed_line)
if log_flag == "log_on":
print("end duplicate###########################################")
#sys.exit()
#################################################################################
#################################################################################
if duplicate_flag == "no_merge":
print("By default TAMA collapse does not allow merging of duplicate transcript groups.")
print("Duplicate transcript groups occur when different groupings of transcripts results in the same collapsed model.")
print("If you would like to merge duplicate transcript groups please add -d merge_dup to the arguments.")
sys.exit()
elif duplicate_flag == "merge_dup":
# add trans obj to merge group
for a_uniq_trans_id in trans_obj.merged_trans_dict:
pos_trans_dict[trans_pos_line].add_merged_trans(trans_obj.merged_trans_dict[a_uniq_trans_id])
match_trans_obj_list = []
# collect trans obj in list
for b_uniq_trans_id in pos_trans_dict[trans_pos_line].merged_trans_dict:
match_trans_obj_list.append(pos_trans_dict[trans_pos_line].merged_trans_dict[b_uniq_trans_id])
#collapse_start_list, collapse_end_list, start_wobble_list, end_wobble_list, e_start_trans_dict, e_end_trans_dict = collapse_transcripts(match_trans_obj_list, collapse_flag)
collapse_start_list,collapse_end_list,start_wobble_list,end_wobble_list,collapse_sj_start_err_list,collapse_sj_end_err_list,collapse_start_error_nuc_list,collapse_end_error_nuc_list = collapse_transcripts(match_trans_obj_list,fiveprime_cap_flag,collapse_flag)
# update merge info
#pos_trans_dict[trans_pos_line].add_merge_info(collapse_start_list, collapse_end_list, start_wobble_list,end_wobble_list, e_start_trans_dict, e_end_trans_dict)
pos_trans_dict[trans_pos_line].add_merge_info(collapse_start_list,collapse_end_list,start_wobble_list,end_wobble_list,collapse_sj_start_err_list,collapse_sj_end_err_list,collapse_start_error_nuc_list,collapse_end_error_nuc_list )
trans_obj = pos_trans_dict[trans_pos_line]
else:
print("Error with duplicate transcript group flag")
sys.exit()
#################################################################################
#################################################################################
if dup_detect_flag == 0:
pos_trans_dict[trans_pos_line] = trans_obj
pos_trans_list.append(trans_pos_line)
elif dup_detect_flag == 1:
pos_trans_dict[trans_pos_line] = trans_obj
else:
print("Error with dup_detect_flag.")
sys.exit()
[new_pos_trans_list,new_pos_trans_dict] = sort_pos_trans_list(pos_trans_list, pos_trans_dict)
sorted_trans_obj_list = []
for pos_trans in new_pos_trans_list:
trans_obj = new_pos_trans_dict[pos_trans]
sorted_trans_obj_list.append(trans_obj)
tmp_id = trans_obj.trans_id
# sorted_trans_obj_list list of trana obj that have been sorted by position
return sorted_trans_obj_list
##############################################################################
def longest_transcript(trans_id,trans_id_list):
#determines if transcript is one of the longest in group on the 5 prime end
# if longest_trans == "longest" than this is one of the longest trans
# if "long" then it is within range3 of the longest but it is not the longest
# if equal to "short" it is not
trans_obj = trans_obj_dict[trans_id]
strand = trans_obj.strand
trans_start = trans_obj.start_pos
trans_end = trans_obj.end_pos
num_exons = trans_obj.num_exons
longest_trans = "none"
#exon_diff_threshold = 10
#fiveprime_threshold = 10
#threeprime_threshold = 10
longest_comp_list = []
##############################################
#new algorithm
longest_num_exons = 0
longest_tss_pos = -1
longest_trans_id_dict = {} # longest_trans_id_dict[trans id] = longest_tss_pos
longest_trans_id = "none"
#find the longest in the list first
for o_trans_id in trans_id_list:
o_trans_obj = trans_obj_dict[o_trans_id]
o_trans_start = o_trans_obj.start_pos
o_trans_end = o_trans_obj.end_pos
o_num_exons = o_trans_obj.num_exons
if o_num_exons > longest_num_exons:
longest_num_exons = o_num_exons
if o_num_exons == longest_num_exons:
if strand == "+":
if longest_tss_pos == -1:
longest_tss_pos = o_trans_start
longest_trans_id = o_trans_id
elif o_trans_start < longest_tss_pos:
longest_tss_pos = o_trans_start
longest_trans_id = o_trans_id
elif strand == "-":
if longest_tss_pos == -1:
longest_tss_pos = o_trans_end
longest_trans_id = o_trans_id
elif o_trans_end > longest_tss_pos:
longest_tss_pos = o_trans_end
longest_trans_id = o_trans_id
else:
print("Error with strands in longest_transcript")
sys.exit()
#compare the longest in the list to the trans_id
if strand == "+":
if num_exons > longest_num_exons:
longest_trans = "longest"
elif num_exons == longest_num_exons:
if trans_start < longest_tss_pos:
longest_trans = "longest"
elif trans_start == longest_tss_pos:
longest_trans = "longest"
elif trans_start > longest_tss_pos:
start_match_flag,start_diff_num = fuzzy_match(trans_start,longest_tss_pos,exon_diff_threshold)
if start_match_flag == "wobbly_match":
longest_trans = "long"
elif start_match_flag == "no_match":
longest_trans = "short"
else:
print("Error with fuzzy_match in longest_transcript")
print(start_match_flag)
sys.exit()
else:
print("Error with trans_start/longest_tss_pos comparison in longest_transcript ")
sys.exit()
elif num_exons < longest_num_exons:
longest_trans = "short"
else:
print("Error with num exons comparison in longest_transcript ")
sys.exit()
elif strand == "-":
if num_exons > longest_num_exons:
longest_trans = "longest"
elif num_exons == longest_num_exons:
if trans_end > longest_tss_pos:
longest_trans = "longest"
elif trans_end == longest_tss_pos:
longest_trans = "longest"
elif trans_end < longest_tss_pos:
start_match_flag,start_diff_num = fuzzy_match(trans_end,longest_tss_pos,exon_diff_threshold)
if start_match_flag == "wobbly_match":
longest_trans = "long"
elif start_match_flag == "no_match":
longest_trans = "short"
else:
print("Error with fuzzy_match in longest_transcript")
print(start_match_flag)
sys.exit()
else:
print("Error with trans_start/longest_tss_pos comparison in longest_transcript ")
sys.exit()
elif num_exons < longest_num_exons:
longest_trans = "short"
else:
print("Error with num exons comparison in longest_transcript ")
sys.exit()
else:
print("Error with strand in longest_transcript")
sys.exit()
#new algorithm
##############################################
return longest_trans
##############################################################################
# Use this class to manage group merging
class TransGroup:
def __init__(self, group_name):
self.group_name = group_name
self.trans_group_dict = {} #trans_group_dict[trans id][trans group] = "longest" or "short" for no cap
self.group_trans_dict = {} # group_trans_dict[group num][trans id] = "longest" or "short" for no cap
self.group_count = 0
self.group_longest_dict = {} #group_longest_dict[group num][longest/long][trans] = 1
self.group_max_exon_dict = {} # group_max_exon_dict[group num] = max exon num
def check_trans_status(self,trans_a):
# may add on for future features
group_check = 0
if trans_a in self.trans_group_dict:
group_check = 1
return group_check
def check_same_group(self,trans_a,trans_b):
group_match_flag = 0
for a_trans_group in self.trans_group_dict[trans_a]:
if a_trans_group in self.trans_group_dict[trans_b]:
group_match_flag = 1
return group_match_flag
def check_nocap_group(self,trans_a):
#checks the number of associated trans to a nocap group
assoc_trans_dict = {} # assoc_trans_dict[trans id] = 1
for a_trans_group in self.trans_group_dict[trans_a]:
for trans_b in self.group_trans_dict[a_trans_group]:
if trans_b != trans_a:
assoc_trans_dict[trans_b] = 1
num_assoc_trans = len(list(assoc_trans_dict.keys()))
return num_assoc_trans
def delete_trans(self,trans_a):
#delete trans from all its associated groups
for a_trans_group in self.trans_group_dict[trans_a]:
self.group_trans_dict[a_trans_group].pop(trans_a,None)
if len(list(self.group_trans_dict[a_trans_group])) == 0:# if ther group is now empty, delete it
self.group_trans_dict.pop(a_trans_group,None)
#delete trans from trans group dict
self.trans_group_dict.pop(trans_a,None)
def new_group_a(self,trans_a):
if log_flag == "log_on":
print("invoke new_group_a " + str(self.group_count) + " newgroup " + trans_a)
self.group_count += 1
if trans_a in self.trans_group_dict:
print("Error in new group, trans_a already in group")
sys.exit()
if self.group_count in self.group_trans_dict:
print("group num already used")
sys.exit()
self.trans_group_dict[trans_a] = {}
self.trans_group_dict[trans_a][self.group_count] = "longest"
self.group_trans_dict[self.group_count] = {}
self.group_trans_dict[self.group_count][trans_a] = "longest"
self.group_longest_dict[self.group_count] = {}
self.group_longest_dict[self.group_count]["longest"] = {}
self.group_longest_dict[self.group_count]["longest"][trans_a] = 1
self.group_max_exon_dict[self.group_count] = trans_obj_dict[trans_a].num_exons ######################
##############################################################################
def add_a_to_b_group(self,trans_a,trans_b):
if log_flag == "log_on":
print("invoke add_a_to_b_group " + trans_a + " " + trans_b)
# only cap libs should be used for b group and they only have one group
# does not take all of a_group just uses a_trans
if len(list(self.trans_group_dict[trans_b].keys())) > 1:
if log_flag == "log_on":
print("multiple groups")
#sys.exit()
#check that trans b has more exons than trans a
trans_obj_a = trans_obj_dict[trans_a]
trans_obj_b = trans_obj_dict[trans_b]
#if trans_obj_a.num_exons >= trans_obj_b.num_exons: ######################################## 2019/06/07
# print("Error trans_a does not have fewer exons than trans_b")
# print(trans_a + " " + trans_b)
# print(str(trans_obj_a.num_exons) + " " + str(trans_obj_b.num_exons))
# sys.exit()
#remove initial nocap group that is a self identity group
if len(list(self.trans_group_dict[trans_a].keys())) == 1:# if only one group
a_trans_group = list(self.trans_group_dict[trans_a].keys())[0]
if log_flag == "log_on":
print(trans_a)
if len(list(self.group_trans_dict[a_trans_group].keys())) == 1: #if only this in one group
if list(self.group_trans_dict[a_trans_group].keys())[0] == trans_a:
self.group_trans_dict.pop(a_trans_group,None)
self.trans_group_dict.pop(trans_a,None)
if trans_a not in self.trans_group_dict:
self.trans_group_dict[trans_a] = {}
else:
# this happens if trans_a is a nocap trans in which case it can be in multiple groups
if log_flag == "log_on":
print("trans_a already in group, should be nocap trans: " + trans_a)
for b_group_num in list(self.trans_group_dict[trans_b].keys()):
# add a trans to b group
self.trans_group_dict[trans_a][b_group_num] = "short"
self.group_trans_dict[b_group_num][trans_a] = "short"
# a_trans has fewer exons than b_trans so it must be short
## search through a groups for group mergings
## if a is longest in any of it's groups then you can add other trans from those groups to b
#for a_group_num in list(self.trans_group_dict[trans_a].keys()):
# if a_group_num == b_group_num: ######################################## 2019/06/07
# continue
#
# if log_flag == "log_on":
# print(str(a_group_num) + "-a and b group num-" + str(b_group_num))
# # if trans_a is the longest in group
# #add all shorter transcripts from a group to b group too
# if trans_a in self.group_longest_dict[a_group_num]["longest"]:
# a_trans_id_list = list(self.group_trans_dict[a_group_num].keys())
# for a_group_trans in a_trans_id_list:
# self.trans_group_dict[a_group_trans][b_group_num] = "short"
# self.group_trans_dict[b_group_num][a_group_trans] = "short"
# self.trans_group_dict[a_group_trans].pop(a_group_num,None)
# self.group_trans_dict.pop(a_group_num,None)
# self.group_longest_dict.pop(a_group_num,None)
# dont need to redo longest and short because added a trans is all short compared to b group
##############################################################################
def merge_a_b_groups(self,trans_a,trans_b):
if log_flag == "log_on":
print("invoke merge_a_b_groups "+ str(self.group_count )+ " " + trans_a + " " +trans_b )
#self.group_count += 1
#only cap lib trans should be used for merging groups
if len(list(self.trans_group_dict[trans_a].keys())) > 1:
print("multiple groups a")
sys.exit()
if len(list(self.trans_group_dict[trans_b].keys())) > 1:
print("multiple groups b")
sys.exit()
a_group_num = list(self.trans_group_dict[trans_a].keys())[0]
b_group_num = list(self.trans_group_dict[trans_b].keys())[0]
if a_group_num == b_group_num:
print("Error, groups are the same, no need to merge!")
sys.exit()
#if self.group_count in self.group_trans_dict:
# print("group num already used")
# sys.exit()
#find bigger group
num_trans_group_a = len(list(self.group_trans_dict[a_group_num].keys()))
num_trans_group_b = len(list(self.group_trans_dict[b_group_num].keys()))
merge_group_num = -1
if num_trans_group_a > num_trans_group_b:
merge_group_num = a_group_num
self.group_max_exon_dict[merge_group_num] = self.group_max_exon_dict[a_group_num]
for group_trans in self.group_trans_dict[b_group_num]:
self.group_trans_dict[merge_group_num][group_trans] = 1
self.trans_group_dict[group_trans].pop(b_group_num, None)
self.trans_group_dict[group_trans][merge_group_num] = 1
#remove old group
self.group_trans_dict.pop(b_group_num, None)
elif num_trans_group_b >= num_trans_group_a:
merge_group_num = b_group_num
self.group_max_exon_dict[merge_group_num] = self.group_max_exon_dict[b_group_num]
for group_trans in self.group_trans_dict[a_group_num]:
self.group_trans_dict[merge_group_num][group_trans] = 1
self.trans_group_dict[group_trans].pop(a_group_num, None)
self.trans_group_dict[group_trans][merge_group_num] = 1
#remove old group
self.group_trans_dict.pop(a_group_num, None)
else:
print("Error with comparing group trans counts")
sys.exit()
#add longest trans information
#trans_id_list = list(self.group_trans_dict[merge_group_num].keys())
#redo longest trans flags
#for trans_c in self.group_trans_dict[merge_group_num]:
# longest_trans_flag = longest_transcript(trans_c,trans_id_list)
# self.trans_group_dict[trans_c][merge_group_num] = longest_trans_flag
# self.group_trans_dict[merge_group_num][trans_c] = longest_trans_flag
if merge_group_num == -1:
print("Error with merge groups, merge_group_num == -1")
sys.exit()
##############################################################################
def merge_a_b_groups_nocap(self,trans_a,trans_b):
# need to create new group and cant use time saving thing in capped merge
# this is because nocaps can have multiple groups
self.group_count += 1
if log_flag == "log_on":
print("invoke merge_a_b_groups_nocap " + str(self.group_count )+ " " + trans_a + " " +trans_b )
#only cap lib trans should be used for merging groups
if len(list(self.trans_group_dict[trans_a].keys())) > 1:
if log_flag == "log_on":
print("multiple groups a nocap")
if len(list(self.trans_group_dict[trans_b].keys())) > 1:
if log_flag == "log_on":
print("multiple groups b nocap")
#check that trans b has same num exons as trans a
trans_obj_a = trans_obj_dict[trans_a]
trans_obj_b = trans_obj_dict[trans_b]
if trans_obj_a.num_exons != trans_obj_b.num_exons:
print("Error trans_a does not same num exons as trans_b")
print(trans_a + " " + trans_b)
print(str(trans_obj_a.num_exons) + " " + str(trans_obj_b.num_exons))
sys.exit()
a_group_num_list = list(self.trans_group_dict[trans_a].keys())
if self.group_count in self.group_trans_dict:
print("group num already used")
sys.exit()
#make new group
self.group_trans_dict[self.group_count] = {}
self.group_max_exon_dict[self.group_count] = 0
for a_group_num in a_group_num_list:
if self.group_max_exon_dict[a_group_num] == trans_obj_dict[trans_a].num_exons: # if this trans has as many exons as longest trans in group
#if self.group_trans_dict[a_group_num][trans_a].startswith("long"): # could be longest or long
for group_trans in self.group_trans_dict[a_group_num]:
self.group_trans_dict[self.group_count][group_trans] = 1 #make new group
self.trans_group_dict[group_trans].pop(a_group_num, None) # remove old group num
self.trans_group_dict[group_trans][self.group_count] = 1 #add new group num
self.group_max_exon_dict[self.group_count] = self.group_max_exon_dict[a_group_num]
#remove old group
self.group_trans_dict.pop(a_group_num, None)
self.group_longest_dict.pop(a_group_num, None)
# get b group list after a group processing because
# some of b groups could have been same as a groups
# thus they would have been deleted
b_group_num_list = list(self.trans_group_dict[trans_b].keys())
for b_group_num in b_group_num_list:
if self.group_max_exon_dict[b_group_num] == trans_obj_dict[trans_b].num_exons: # if this trans has as many exons as longest trans in group
#if self.group_trans_dict[b_group_num][trans_b].startswith("long"): # could be longest or long
for group_trans in self.group_trans_dict[b_group_num]:
self.group_trans_dict[self.group_count][group_trans] = 1 # make new group
self.trans_group_dict[group_trans].pop(b_group_num, None) # remove old group num
self.trans_group_dict[group_trans][self.group_count] = 1 # add new group num
if self.group_max_exon_dict[self.group_count] < self.group_max_exon_dict[b_group_num]:
self.group_max_exon_dict[self.group_count] = self.group_max_exon_dict[b_group_num]
#remove old group
self.group_trans_dict.pop(b_group_num, None)
self.group_longest_dict.pop(b_group_num, None)
#add longest trans information
trans_id_list = list(self.group_trans_dict[self.group_count].keys())
if len(trans_id_list) > 0:# new group was made
#redo longest trans flags
#get longest trans in new group
longest_trans_id = trans_id_list[0]
for check_trans_id in trans_id_list:
longest_trans_flag = longest_transcript(check_trans_id,[longest_trans_id])
if longest_trans_flag == "longest":
longest_trans_id = check_trans_id
#refresh trans flags in new group
self.group_longest_dict[self.group_count] = {}
self.group_longest_dict[self.group_count]["longest"] = {}
for trans_c in trans_id_list:
longest_trans_flag = longest_transcript(trans_c,[longest_trans_id])
self.trans_group_dict[trans_c][self.group_count] = longest_trans_flag
self.group_trans_dict[self.group_count][trans_c] = longest_trans_flag
if longest_trans_flag == "longest":
self.group_longest_dict[self.group_count]["longest"][trans_c] = 1
else: # no new group was made, merge did not happen, 2 short transcripts
self.group_trans_dict.pop(self.group_count, None)
###################################### <<< edge of work
####################################################################################################
def simplify_gene_capped(trans_obj_list,fiveprime_cap_flag): # goes through transcripts in gene and groups transcripts for collapsing
#for capped only!!
if log_flag == "log_on":
print("invoking simplify_gene_capped")
transgroup = TransGroup("transgroup")
#trans_group_dict = {} # trans_group_dict[trans id] = trans group
#group_trans_dict = {} # group_trans_dict[group num][trans id] = 1
#group_num = 0
#new cluster grouping algorithm
############################################################################################################
###########################
# convert trans_obj_list to trans_obj_dict for while looping
# clusters that have yet been grouped
ungrouped_trans_obj_dict = {} # ungrouped_trans_obj_dict[cluster_id] = trans_obj
# clusters that have been grouped
grouped_trans_obj_dict = {} # grouped_trans_obj_dict[cluster_id] = trans_obj
# grouped clusters that have been not been searched/been used as a hunter
unsearched_trans_obj_dict = {} # unsearched_trans_obj_dict[cluster_id] = trans_obj
#############
# create SJ Hash for transcript models if there are many reads 2020/07/27
num_group_reads = len(trans_obj_list)
#if num_group_reads > sj_hash_read_threshold - 1:
# for trans_obj in trans_obj_list:
# #trans_obj.make_sj_hash_int()
# trans_obj.make_sj_hash_string()
#############
for trans_obj in trans_obj_list:
ungrouped_trans_obj_dict[trans_obj.cluster_id] = trans_obj
###########################
#all_trans_id_dict = {} # all_trans_id_dict[trans id] = 1
ungrouped_count = len(trans_obj_list)
unsearched_count = 0
while ungrouped_count > 0:
if unsearched_count == 0:
ungrouped_cluster_list = list(ungrouped_trans_obj_dict.keys())
ungrouped_cluster_list.sort()
# hunter trans and prey trans, hunter used to look for prey
hunter_cluster_id = ungrouped_cluster_list[0]
hunter_trans_obj = ungrouped_trans_obj_dict[hunter_cluster_id]
# remove hunter from ungrouped
ungrouped_trans_obj_dict.pop(hunter_cluster_id)
unsearched_count = 1
while unsearched_count > 0 and ungrouped_count > 0:
if hunter_cluster_id == "new_hunter":
unsearched_cluster_list = list(unsearched_trans_obj_dict.keys())
unsearched_cluster_list.sort()
# hunter trans and prey trans, hunter used to look for prey
hunter_cluster_id = unsearched_cluster_list[0]
hunter_trans_obj = unsearched_trans_obj_dict[hunter_cluster_id]
unsearched_trans_obj_dict.pop(hunter_cluster_id)
#all_trans_id_dict[hunter_cluster_id] = i
hunter_strand = hunter_trans_obj.strand
hunter_num_exons = hunter_trans_obj.num_exons
a_group_check = transgroup.check_trans_status(hunter_cluster_id)
# make groups for each transcript if no group
if a_group_check != 1:
transgroup.new_group_a(hunter_cluster_id)
for prey_cluster_id in ungrouped_trans_obj_dict:
prey_trans_obj = ungrouped_trans_obj_dict[prey_cluster_id]
prey_strand = prey_trans_obj.strand
prey_num_exons = prey_trans_obj.num_exons
# this condition should not happen anymore because i delete hunters after I use them
if hunter_cluster_id == prey_cluster_id:
continue
# check strand match
if hunter_strand != prey_strand:
print("Strand of transcripts within gene do not match")
sys.exit()
b_group_check = transgroup.check_trans_status(prey_cluster_id)
# make groups for each transcript if no group
if b_group_check != 1:
transgroup.new_group_a(prey_cluster_id)
group_match_flag = transgroup.check_same_group(hunter_cluster_id, prey_cluster_id)
# this shoudn't be needed anymore due to new dict based group search
if group_match_flag == 1: # if they are both in the same group
continue
trans_comp_flag, start_match_list, start_diff_list, end_match_list, end_diff_list, short_trans, long_trans, min_exon_num, diff_num_exon_flag = compare_transcripts(hunter_trans_obj, prey_trans_obj, fiveprime_cap_flag, hunter_strand)
######added to speed things up
#if num_group_reads < sj_hash_read_threshold or hunter_num_exons < 3:
# trans_comp_flag, start_match_list, start_diff_list, end_match_list, end_diff_list, short_trans, long_trans, min_exon_num, diff_num_exon_flag = compare_transcripts(hunter_trans_obj, prey_trans_obj, fiveprime_cap_flag, hunter_strand)
#
#else:
#
# exact_match_flag = exact_match_capped(hunter_trans_obj, prey_trans_obj,hunter_strand)
#
# if exact_match_flag == "exact_match":
# trans_comp_flag = "same_transcript"
#
# elif hunter_num_exons == 1: # exact match is the same as compare for single exon transcripts
# trans_comp_flag = 'diff_transcripts'
#
# elif exact_match_flag == "not_exact":
# trans_comp_flag, start_match_list, start_diff_list, end_match_list, end_diff_list, short_trans, long_trans, min_exon_num, diff_num_exon_flag = compare_transcripts(hunter_trans_obj, prey_trans_obj, fiveprime_cap_flag, hunter_strand)
#
# else:
# print("Error with exact match output")
# sys.exit()
# old system used to generalize for nocap mode but this slows down capped mode
#trans_match_flag = 0
# same_transcript means clusters should be grouped for collapsing!
#if trans_comp_flag == "same_transcript":
# trans_match_flag = 1
#a_group_check = transgroup.check_trans_status(hunter_cluster_id)
#b_group_check = transgroup.check_trans_status(prey_cluster_id)
##########################################Affects all downstream code!
if trans_comp_flag != "same_transcript": # skip if there is no match
continue
else: # if they are in different groups, merge groups, applies to both caps
transgroup.merge_a_b_groups(hunter_cluster_id, prey_cluster_id)
# remove the prey cluster from dict to avoid redundant searching
# ungrouped_trans_obj_dict.pop(prey_cluster_id) # remove this outside of for loop
# add prey to unsearched dict
unsearched_trans_obj_dict[prey_cluster_id] = prey_trans_obj
# remove grouped prey from ungrouped dict
for unsearched_cluster_id in unsearched_trans_obj_dict:
if unsearched_cluster_id in ungrouped_trans_obj_dict:
ungrouped_trans_obj_dict.pop(unsearched_cluster_id)
unsearched_count = len(unsearched_trans_obj_dict)
# reset hunter id
hunter_cluster_id = "new_hunter"
ungrouped_count = len(ungrouped_trans_obj_dict)
############################################################################################################
# End of new cluster grouping algorithm
trans_group_dict = transgroup.trans_group_dict
group_trans_dict = transgroup.group_trans_dict
return trans_group_dict,group_trans_dict
#####################################################################################################
def simplify_gene_nocap(trans_obj_list,fiveprime_cap_flag): # goes through transcripts in gene and groups transcripts for collapsing
# For nocap only!
if log_flag == "log_on":
print("invoking simplify_gene_nocap")
transgroup = TransGroup("transgroup")
#############
# create SJ Hash for transcript models if there are many reads 2020/07/27
num_group_reads = len(trans_obj_list)
#if num_group_reads > sj_hash_read_threshold - 1:
# for trans_obj in trans_obj_list:
# #trans_obj.make_sj_hash_int()
# trans_obj.make_sj_hash_string()
#############
#new cluster grouping algorithm
############################################################################################################
###########################
# convert trans_obj_list to trans_obj_dict for while looping
# clusters that have yet been grouped
ungrouped_trans_obj_dict = {} # ungrouped_trans_obj_dict[cluster_id] = trans_obj
# clusters that have been grouped
grouped_trans_obj_dict = {} # grouped_trans_obj_dict[cluster_id] = trans_obj
# grouped clusters that have been not been searched/been used as a hunter
unsearched_trans_obj_dict = {} # unsearched_trans_obj_dict[cluster_id] = trans_obj
# use this dict to organize clusters by num exons
exon_trans_obj_dict = {} # exon_trans_obj_dict[num exons][cluster id] = trans_obj
# use this to refresh sub exon dicts
sub_exon_cluster_dict = {} # sub_exon_cluster_dict[num exons][cluster id] = 1
for trans_obj in trans_obj_list:
#ungrouped_trans_obj_dict[trans_obj.cluster_id] = trans_obj
this_strand = trans_obj.strand
this_num_exons = trans_obj.num_exons
if this_num_exons not in exon_trans_obj_dict:
exon_trans_obj_dict[this_num_exons] = {}
sub_exon_cluster_dict[this_num_exons] = {}
exon_trans_obj_dict[this_num_exons][trans_obj.cluster_id] = trans_obj
sub_exon_cluster_dict[this_num_exons][trans_obj.cluster_id] = 1
###########################
#all_trans_id_dict = {} # all_trans_id_dict[trans id] = 1
ungrouped_count = len(trans_obj_list)
unsearched_count = 0
exon_num_list = list(exon_trans_obj_dict.keys())
exon_num_list.sort(reverse=True)
max_num_exons = exon_num_list[0]
#exon_num_level = exon_num_list[0]
exon_num_index = 0
sub_length_cluster_dict = {} # sub_length_cluster_dict[cluster id] = 1 use this to mark degraded clusters
while exon_num_index < len(exon_num_list):
exon_num_level = exon_num_list[exon_num_index]
#initialize ungrouped for this exon num level
ungrouped_trans_obj_dict = {}
for ungroup_cluster_id in exon_trans_obj_dict[exon_num_level]:
ungrouped_trans_obj_dict[ungroup_cluster_id] = exon_trans_obj_dict[exon_num_level][ungroup_cluster_id]
ungrouped_count = len(ungrouped_trans_obj_dict)
while ungrouped_count > 0:
if unsearched_count == 0:
ungrouped_cluster_list = list(ungrouped_trans_obj_dict.keys())
ungrouped_cluster_list_sorted = []
ungrouped_strand = ungrouped_trans_obj_dict[ungrouped_cluster_list[0]].strand
# sort by 5' end length to deal with issue of long based nocap merging
ungrouped_cluster_sort_dict = {} # ungrouped_cluster_sort_dict[start][cluster id] = 1
for ungrouped_cluster_id in ungrouped_cluster_list:
if ungrouped_strand == "+":
ungrouped_five_prime = ungrouped_trans_obj_dict[ungrouped_cluster_id].start_pos ####################################################
elif ungrouped_strand == "-":
ungrouped_five_prime = ungrouped_trans_obj_dict[ungrouped_cluster_id].end_pos
else:
print("Error with ungrouped_strand")
sys.exit()
if ungrouped_five_prime not in ungrouped_cluster_sort_dict:
ungrouped_cluster_sort_dict[ungrouped_five_prime] = {}
ungrouped_cluster_sort_dict[ungrouped_five_prime][ungrouped_cluster_id] = 1
ungrouped_cluster_five_list = list(ungrouped_cluster_sort_dict.keys())
if ungrouped_strand == "+":
ungrouped_cluster_five_list.sort()
elif ungrouped_strand == "-":
ungrouped_cluster_five_list.sort(reverse=True)
else:
print("Error with ungrouped_strand")
sys.exit()
for ungrouped_five_coord in ungrouped_cluster_five_list:
for ungrouped_cluster_id in list(ungrouped_cluster_sort_dict[ungrouped_five_coord].keys()):
ungrouped_cluster_list_sorted.append(ungrouped_cluster_id)
ungrouped_cluster_list = ungrouped_cluster_list_sorted
# hunter trans and prey trans, hunter used to look for prey
hunter_cluster_id = ungrouped_cluster_list[0]
hunter_trans_obj = ungrouped_trans_obj_dict[hunter_cluster_id]
# remove hunter from ungrouped
ungrouped_trans_obj_dict.pop(hunter_cluster_id)
unsearched_count = 1
#use this to keep track of sub clusters that have not already been grouped with this group
this_sub_exon_cluster_dict = sub_exon_cluster_dict
while unsearched_count > 0:
if hunter_cluster_id == "new_hunter":
unsearched_cluster_list = list(unsearched_trans_obj_dict.keys())
unsearched_cluster_list.sort()
if len(unsearched_cluster_list) == 0:
print("empty unsearched_cluster_list")
print(hunter_cluster_id)
print(ungrouped_trans_obj_dict)
print(unsearched_count)
print(ungrouped_count)
# hunter trans and prey trans, hunter used to look for prey
hunter_cluster_id = unsearched_cluster_list[0]
hunter_trans_obj = unsearched_trans_obj_dict[hunter_cluster_id]
unsearched_trans_obj_dict.pop(hunter_cluster_id)
# print("hunter: " + hunter_cluster_id )
#print(exon_num_list)
#all_trans_id_dict[hunter_cluster_id] = i
hunter_strand = hunter_trans_obj.strand
hunter_num_exons = hunter_trans_obj.num_exons
a_group_check = transgroup.check_trans_status(hunter_cluster_id)
# make groups for each transcript if no group
if a_group_check != 1:
transgroup.new_group_a(hunter_cluster_id)
ungrouped_trans_obj_list = list(ungrouped_trans_obj_dict.keys())
unsearched_cluster_list = list(unsearched_trans_obj_dict.keys())
# search at same exon num level
###############################################################################
if hunter_cluster_id not in sub_length_cluster_dict: # only search at same level if this has not been grouped with longer transcript
for prey_cluster_id in ungrouped_trans_obj_dict:
prey_trans_obj = ungrouped_trans_obj_dict[prey_cluster_id]
prey_strand = prey_trans_obj.strand
prey_num_exons = prey_trans_obj.num_exons
# this condition should not happen anymore because i delete hunters after I use them
if hunter_cluster_id == prey_cluster_id:
continue
# check strand match
if hunter_strand != prey_strand:
print("Strand of transcripts within gene do not match")
sys.exit()
b_group_check = transgroup.check_trans_status(prey_cluster_id)
# make groups for each transcript if no group
if b_group_check != 1:
transgroup.new_group_a(prey_cluster_id)
group_match_flag = transgroup.check_same_group(hunter_cluster_id, prey_cluster_id)
# this shoudn't be needed anymore due to new dict based group search
if group_match_flag == 1: # if they are both in the same group
continue
trans_comp_flag, start_match_list, start_diff_list, end_match_list, end_diff_list, short_trans, long_trans, min_exon_num, diff_num_exon_flag = compare_transcripts(hunter_trans_obj, prey_trans_obj, fiveprime_cap_flag, hunter_strand)
# this is for same number of exon matches
#if num_group_reads < sj_hash_read_threshold or hunter_num_exons < 3:
# trans_comp_flag, start_match_list, start_diff_list, end_match_list, end_diff_list, short_trans, long_trans, min_exon_num, diff_num_exon_flag = compare_transcripts(hunter_trans_obj, prey_trans_obj, fiveprime_cap_flag, hunter_strand)
#else:
# exact_match_flag = exact_match_nocap(hunter_trans_obj, prey_trans_obj,hunter_strand)
#
# if exact_match_flag == "exact_match":
# trans_comp_flag = "same_three_prime_same_exons" # Use this for long-long nocap comparison
# elif hunter_num_exons == 1:
# trans_comp_flag = 'diff_transcripts'
# elif exact_match_flag == "not_exact":
# trans_comp_flag, start_match_list, start_diff_list, end_match_list, end_diff_list, short_trans, long_trans, min_exon_num, diff_num_exon_flag = compare_transcripts(hunter_trans_obj, prey_trans_obj, fiveprime_cap_flag, hunter_strand)
#
# else:
# print("Error with exact match output")
# sys.exit()
# print("compare_transcripts: "+hunter_trans_obj.cluster_id +"\t"+prey_trans_obj.cluster_id+"\t" +trans_comp_flag + "\t" + str(hunter_trans_obj.num_exons)+ "\t" + str(prey_trans_obj.num_exons) + "\t" + str(diff_num_exon_flag))
#For nocap only!!!!
trans_match_flag = 0
if trans_comp_flag == "same_transcript":
trans_match_flag = 1
elif trans_comp_flag == "same_three_prime_same_exons" :
trans_match_flag = 1
#elif trans_comp_flag == "same_three_prime_diff_exons":
# trans_match_flag = 1
a_group_check = transgroup.check_trans_status(hunter_cluster_id)
b_group_check = transgroup.check_trans_status(prey_cluster_id)
##########################################Affects all downstream code!
###For no cap!!!
# only merge groups if they have the same number of exons
# if diff num exons then only add shorter one to longer one
if trans_match_flag != 1: # skip if there is no match
continue
else: # if they are in different groups, but match
if hunter_num_exons == prey_num_exons: # same number of exons
if trans_comp_flag == "same_transcript": ###################### 2019/06/07
#print(transgroup.group_trans_dict)
transgroup.merge_a_b_groups_nocap(hunter_cluster_id,prey_cluster_id)
elif trans_comp_flag == "same_three_prime_same_exons" : ###################### 2019/06/07
transgroup.add_a_to_b_group(prey_cluster_id,hunter_cluster_id)
else:
print("Error with match flag")
print(trans_comp_flag)
sys.exit()
#print(transgroup.group_trans_dict)
# elif hunter_num_exons > prey_num_exons: #add shorter to longer
# transgroup.add_a_to_b_group(prey_cluster_id,hunter_cluster_id)
# elif prey_num_exons > hunter_num_exons: # add shorter to longer
# transgroup.add_a_to_b_group(hunter_cluster_id,prey_cluster_id)
# remove the prey cluster from dict to avoid redundant searching
# ungrouped_trans_obj_dict.pop(prey_cluster_id) # remove this outside of for loop
# add prey to unsearched dict
unsearched_trans_obj_dict[prey_cluster_id] = prey_trans_obj
# remove grouped prey from ungrouped dict
#for unsearched_cluster_id in unsearched_trans_obj_dict: #2019_06_07
# if unsearched_cluster_id in ungrouped_trans_obj_dict: #2019_06_07
# ungrouped_trans_obj_dict.pop(unsearched_cluster_id) #2019_06_07
# search at same exon num level end
###############################################################################
################################################################################
# search at lower exon num level
prey_exon_index = exon_num_index
#this_sub_exon_cluster_dict.pop(exon_num_list[prey_exon_index])
while prey_exon_index < len(exon_num_list):
prey_exon_index += 1
if prey_exon_index >= len(exon_num_list):
continue
prey_exon_level = exon_num_list[prey_exon_index]
#initialize ungrouped for this exon num level
subgrouped_trans_obj_dict = {}
#for subgroup_cluster_id in exon_trans_obj_dict[prey_exon_level]:
for subgroup_cluster_id in this_sub_exon_cluster_dict[prey_exon_level]:
subgrouped_trans_obj_dict[subgroup_cluster_id] = exon_trans_obj_dict[prey_exon_level][subgroup_cluster_id]
for prey_cluster_id in subgrouped_trans_obj_dict:
prey_trans_obj = subgrouped_trans_obj_dict[prey_cluster_id]
prey_strand = prey_trans_obj.strand
prey_num_exons = prey_trans_obj.num_exons
# this condition should not happen anymore because i delete hunters after I use them
if hunter_cluster_id == prey_cluster_id:
continue
# check strand match
if hunter_strand != prey_strand:
print("Strand of transcripts within gene do not match")
sys.exit()
b_group_check = transgroup.check_trans_status(prey_cluster_id)
# make groups for each transcript if no group
if b_group_check != 1:
transgroup.new_group_a(prey_cluster_id)
group_match_flag = transgroup.check_same_group(hunter_cluster_id, prey_cluster_id)
# this shoudn't be needed anymore due to new dict based group search
if group_match_flag == 1: # if they are both in the same group
continue
trans_comp_flag, start_match_list, start_diff_list, end_match_list, end_diff_list, short_trans, long_trans, min_exon_num, diff_num_exon_flag = compare_transcripts(hunter_trans_obj, prey_trans_obj, fiveprime_cap_flag, hunter_strand)
#For nocap only!!!!
trans_match_flag = 0
if trans_comp_flag == "same_transcript":
#trans_match_flag = 1
print("Error with subgroup seach same_transcript")
sys.exit()
elif trans_comp_flag == "same_three_prime_same_exons" :
#trans_match_flag = 1
print("Error with subgroup seach same_three_prime_same_exons")
sys.exit()
elif trans_comp_flag == "same_three_prime_diff_exons":
trans_match_flag = 1
a_group_check = transgroup.check_trans_status(hunter_cluster_id)
b_group_check = transgroup.check_trans_status(prey_cluster_id)
##########################################Affects all downstream code!
###For no cap!!!
# only merge groups if they have the same number of exons
# if diff num exons then only add shorter one to longer one
if trans_match_flag != 1: # skip if there is no match
continue
else: # if they are in different groups, but match
if hunter_num_exons == prey_num_exons: # same number of exons
#transgroup.merge_a_b_groups_nocap(hunter_cluster_id,prey_cluster_id)
print("Error with subgroup exon match: equal")
sys.exit()
elif hunter_num_exons > prey_num_exons: #add shorter to longer
transgroup.add_a_to_b_group(prey_cluster_id,hunter_cluster_id)
sub_length_cluster_dict[prey_cluster_id] = 1
#this_sub_exon_cluster_dict[prey_exon_level].pop(prey_cluster_id) ######### 2019/06/06
elif prey_num_exons > hunter_num_exons: # add shorter to longer
#transgroup.add_a_to_b_group(hunter_cluster_id,prey_cluster_id)
print("Error with subgroup exon match: hunter smaller")
sys.exit()
# reset hunter id
hunter_cluster_id = "new_hunter"
unsearched_count = len(unsearched_trans_obj_dict)
ungrouped_count = len(ungrouped_trans_obj_dict)
exon_num_index += 1
################################################################################
############################################################################################################
# End of new cluster grouping algorithm
trans_group_dict = transgroup.trans_group_dict
group_trans_dict = transgroup.group_trans_dict
return trans_group_dict,group_trans_dict
####################################################################################################
####################################################################################################
####################################################################################################
def reverse_complement(seq_list):
comp_dict = {}
comp_dict["A"] = "T"
comp_dict["T"] = "A"
comp_dict["G"] = "C"
comp_dict["C"] = "G"
comp_dict["N"] = "N"
comp_dict["R"] = "N"
comp_dict["Y"] = "N"
comp_dict["K"] = "N"
comp_dict["M"] = "N"
comp_dict["S"] = "N"
comp_dict["W"] = "N"
comp_dict["B"] = "N"
comp_dict["D"] = "N"
comp_dict["H"] = "N"
comp_dict["V"] = "N"
reverse_seq = seq_list[::-1] # that's a neat way of reversing the string
rev_comp_list = []
for base in reverse_seq:
rev_comp_list.append(comp_dict[base])
#rev_comp_string = "".join(rev_comp_list)
return rev_comp_list
def detect_polya(trans_obj,a_window): # looks for a stretch of poly A in the genome region 3' of the end of the transcript
strand = trans_obj.strand
scaffold = trans_obj.scaff_name
if strand == "+":
trans_end = trans_obj.end_pos
downstream_seq = fasta_dict[scaffold][trans_end:trans_end+a_window]
dseq_length = len(downstream_seq)
if dseq_length == 0:
if log_flag == "log_on":
print("dseq_length == 0")
print(trans_obj.trans_id)
print(scaffold + " " + str(trans_obj.start_pos) + " " + str(trans_obj.end_pos) + " " + strand)
dseq_length = 1
a_count = downstream_seq.count("A")
n_count = downstream_seq.count("N")
a_percent = float(a_count) / float(dseq_length)
n_percent = float(n_count) / float(dseq_length)
elif strand == "-":
trans_end = trans_obj.start_pos
a_window_start = trans_end-a_window
if a_window_start < 0:
if log_flag == "log_on":
print("Window start less than 0")
print(trans_obj.trans_id)
print(scaffold + " " + str(trans_obj.start_pos) + " " + str(trans_obj.end_pos)+ " " + strand)
a_window_start = 0
downstream_seq = fasta_dict[scaffold][a_window_start:trans_end]
rev_comp_seq = reverse_complement(downstream_seq)
downstream_seq = rev_comp_seq
dseq_length = len(downstream_seq)
if dseq_length == 0:
if log_flag == "log_on":
print("dseq_length == 0")
print(trans_obj.trans_id)
print(scaffold + " " + str(trans_obj.start_pos) + " " + str(trans_obj.end_pos)+ " " + strand)
dseq_length = 1
a_count = downstream_seq.count("A")
n_count = downstream_seq.count("N")
a_percent = float(a_count) / float(dseq_length)
n_percent = float(n_count) / float(dseq_length)
#a_count = downstream_seq.count("T")
#n_count = downstream_seq.count("N")
#a_percent = float(a_count) / float(dseq_length)
#n_percent = float(n_count) / float(dseq_length)
else:
print("Error with strand information for poly a detection")
sys.exit()
return downstream_seq,dseq_length,a_count,n_count,a_percent,n_percent
def detect_rt_switch(trans_obj): # looks for complementary structure in intronic region that might cause rt switch
rt_window = 20 #window to search
bind_length = 8 #kmer used to represent binding
strand = trans_obj.strand
scaffold = trans_obj.scaff_name
this_exon_starts = trans_obj.exon_start_list
this_exon_ends = trans_obj.exon_end_list
num_junct_bind = 0
bind_seq_dict = {} # bind_seq_dict[splice junction]['end seq'/'start seq'] = seq
for i in xrange(len(this_exon_starts)-1):
bind_flag = 0
start_index = i + 1
end_index = i
end_seq = fasta_dict[scaffold][end_index:end_index+rt_window]
start_seq = fasta_dict[scaffold][start_index-rt_window:start_index]
rev_comp_end_seq = reverse_complement(end_seq)
binding_dict = {} # binding_dict[bind seq] = 1
for j in xrange(rt_window-bind_length):
bind_seq = start_seq[j:j+bind_length]
bind_seq_string = "".join(bind_seq)
binding_dict[bind_seq_string] = 1
for j in xrange(rt_window-bind_length):
bind_seq = rev_comp_end_seq[j:j+bind_length]
bind_seq_string = "".join(bind_seq)
if bind_seq_string in binding_dict:
bind_flag += 1
if bind_flag > 0:
num_junct_bind += 1
if i not in bind_seq_dict:
bind_seq_dict[i] = {}
bind_seq_dict[i][1] = end_seq
bind_seq_dict[i][2] = start_seq
bind_seq_dict[i][3] = rev_comp_end_seq
return num_junct_bind,bind_seq_dict
####################################################################################################
def compare_multimaps(trans_obj_a,trans_obj_b): ### Added this 2019/03/04
best_map_id = "na"
a_percent_cov = trans_obj_a.percent_cov
b_percent_cov = trans_obj_b.percent_cov
a_percent_identity = trans_obj_a.percent_identity
b_percent_identity = trans_obj_b.percent_identity
a_pass_flag = 0
b_pass_flag = 0
if a_percent_cov > coverage_threshold and a_percent_identity > identity_threshold:
a_pass_flag = 1
if b_percent_cov > coverage_threshold and b_percent_identity > identity_threshold:
b_pass_flag = 1
if a_pass_flag == 0 and b_pass_flag == 0:
best_trans_obj = trans_obj_a # both will not pass thresholds so just choose A
best_map_id = "A"
elif a_pass_flag > 0 and b_pass_flag == 0:
best_trans_obj = trans_obj_a # B does not pass thresholds
best_map_id = "A"
elif a_pass_flag == 0 and b_pass_flag > 0:
best_trans_obj = trans_obj_b # A does not pass thresholds
best_map_id = "B"
elif a_pass_flag > 0 and b_pass_flag > 0:
# just use coverage to choose best mapping
if a_percent_cov >= b_percent_cov:
best_trans_obj = trans_obj_a
best_map_id = "A"
elif a_percent_cov < b_percent_cov:
best_trans_obj = trans_obj_b
best_map_id = "B"
else:
print("Error with compare_multimaps")
print("Early termination of TAMA collapse run!!")
sys.exit()
return best_trans_obj,best_map_id
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################### Loop through fasta file
fasta_dict = {} # fasta_dict[scaffold name] = array for seq
fasta_header_dict = {} # fasta_header_dict[scaffold name] = fasta header
fasta_scaffold_list = [] # list of fatsa seq names to be compared to SAM file header
prev_time = track_time(start_time,prev_time)
#Create fasta lookup dict
print("going through fasta")
for seq_record in SeqIO.parse(fasta_file_name, "fasta"):
seq_name = str(seq_record.id)
seq_desc = str(seq_record.description)
seq_string = str(seq_record.seq)
seq_string = seq_string.upper()
seq_length = len(seq_string)
fasta_dict[seq_name] = list(seq_string)
fasta_header_dict[seq_name] = seq_desc
fasta_scaffold_list.append(seq_name)
sam_flag_dict = {} #sam_flag_dict[flag number] = meaning
sam_flag_dict[0] = "forward_strand"
sam_flag_dict[4] = "unmapped"
sam_flag_dict[16] = "reverse_strand"
sam_flag_dict[2048] = "chimeric"
sam_flag_dict[2064] = "chimeric"
sam_flag_dict[256] = "not_primary"
sam_flag_dict[272] = "not_primary"
unmapped_dict = {} # unmapped_dict[cluster id] = 1
sam_scaffold_list = []
sam_scaffold_dict = {} # sam_scaffold_dict[seq name] = seq length
####################################################################################################
########################################################################### loop through sam file
trans_obj_dict = {} # trans_obj_dict[cluster id] = trans obj
group_trans_list_dict = {} # group_trans_list_dict[group id] = list of trans
trans_group_dict = {} # trans_group_dict[trans id] = group id
this_scaffold = "none"
group_start_pos = 0
group_end_pos = 0
group_count = 0
scaffold_list = []
sam_count = 0
prev_time = track_time(start_time,prev_time)
#################################################################################################### SAM or BAM
print("going through sam file")
if bam_flag == "BAM":
from subprocess import Popen, PIPE
sam_file_contents = []
samtools_path = "samtools"
pline = [samtools_path, 'view', sam_file]
try:
p = Popen(pline, bufsize=-1, stdout=PIPE, stderr=PIPE)
except OSError:
raise OSError('Samtools not found!\n')
sam_file_list = p.communicate()
sam_file_contents = sam_file_list[0].split("\n")
print(len(sam_file_contents))
elif bam_flag == "SAM":
sam_file_obj = open(sam_file)
sam_file_contents = sam_file_obj.read().rstrip("\n").split("\n")
##########################
############################################################################################################
############################################################################################################
############################################################################################################
# original mode start ##############################################
if run_mode_flag == "original":
for line in sam_file_contents:
#if sam_count == 0:
# print(line)
line_split = line.split("\t")
# if line_split[0] == "@SQ":
# seq_name = line_split[1].split(":")[1]
# seq_length = line_split[2].split(":")[1]
#
# sam_scaffold_dict[seq_name] = seq_length
# sam_scaffold_list.append(seq_name)
#
if line.startswith("@"):
continue
if line == "":
continue
sam_count += 1
if sam_count % 5000 == 0:
print("sam count " + str(sam_count))
read_id = line_split[0]
sam_flag = int(line_split[1])
scaff_name = line_split[2]
start_pos = int(line_split[3])
cigar = line_split[5]
read_seq = line_split[9]
seq_list = list(read_seq)
mapped_flag = sam_flag_dict[sam_flag]
####################################
#Check sam and gmap strand info!!!
#get strand information from gmap flag
xs_flag = "na"
for field in line_split:
if "XS:A:" in field:
xs_flag = field.split(":")[-1]
if mapped_flag == "forward_strand" and xs_flag == "-":
outline_strand = "\t".join([read_id,scaff_name,str(start_pos),cigar,"+-"])
outfile_strand.write(outline_strand)
outfile_strand.write("\n")
elif mapped_flag == "reverse_strand" and xs_flag == "+":
outline_strand = "\t".join([read_id,scaff_name,str(start_pos),cigar,"-+"])
outfile_strand.write(outline_strand)
outfile_strand.write("\n")
#
# Above: Check sam and gmap strand info!!!
####################################
if mapped_flag == "unmapped" or mapped_flag == "not_primary" or mapped_flag == "chimeric" :
unmapped_dict[read_id] = 1
accept_flag = mapped_flag # added this 2019/03/04
percent_coverage = "NA"
percent_identity = "NA"
error_line = "NA"
quality_percent = "NA"
length = "NA"
strand = "NA"
cigar = "NA"
cluster_line = "\t".join([read_id,mapped_flag,accept_flag,percent_coverage,percent_identity,error_line, length, cigar])
outfile_cluster.write(cluster_line)
outfile_cluster.write("\n")
continue
map_seq_length = mapped_seq_length(cigar)
[end_pos,exon_start_list,exon_end_list] = trans_coordinates(start_pos,cigar)
[h_count,s_count,i_count,d_count,mis_count,nomatch_dict,sj_pre_error_list,sj_post_error_list] = calc_error_rate(start_pos,cigar,seq_list,scaff_name,read_id)
trans_obj = Transcript(read_id)
trans_obj.add_sam_info(sam_flag,scaff_name,start_pos,cigar,read_seq,seq_list)
trans_obj.add_map_seq_length(map_seq_length)
trans_obj.add_exon_coords(end_pos,exon_start_list,exon_end_list)
trans_obj.add_mismatch(h_count,s_count,i_count,d_count,mis_count,nomatch_dict,sj_pre_error_list,sj_post_error_list)
##### 2020/07/27 sj hash
#trans_obj.make_sj_hash_string()
##### 2020/07/27 sj hash
percent_coverage = trans_obj.calc_coverage()
percent_identity = trans_obj.calc_identity()
percent_coverage_str = str(round(percent_coverage,2))
percent_identity_str = str(round(percent_identity,2))
error_line = trans_obj.make_error_line()
seq_length = trans_obj.seq_length
strand = trans_obj.strand
multimap_flag = 0
if percent_coverage < coverage_threshold or percent_identity < identity_threshold:
accept_flag = "discarded"
cluster_line = "\t".join([read_id,mapped_flag,accept_flag,percent_coverage_str,percent_identity_str,error_line, str(seq_length), cigar])
outfile_cluster.write(cluster_line)
outfile_cluster.write("\n")
#skip the transcript because the mapping is poor
continue
else:
bad_sj_flag,bad_sj_num_list,bad_sj_num_pre_list,bad_sj_num_post_list,bad_sj_error_count_list,lde_outline = sj_error_local_density(trans_obj)
outfile_lde.write(lde_outline)
outfile_lde.write("\n")
if bad_sj_flag > 0:
#sadfsdfs
#bad_sj_num_str_list =convert_int_list_to_string(bad_sj_num_list)
#bad_sj_num_line = ",".join(bad_sj_num_str_list)
#bad_sj_error_count_line = ",".join(bad_sj_error_count_list)
#lde_file_line = "\t".join([trans_obj.cluster_id,trans_obj.scaff_name,str(trans_obj.start_pos),str(trans_obj.end_pos),trans_obj.strand,bad_sj_num_line,bad_sj_error_count_line,trans_obj.cigar])
#outfile_lde.write(lde_outline)
#outfile_lde.write("\n")
#######################################
#add to cluster file
accept_flag = "local_density_error"
cluster_line = "\t".join([read_id,mapped_flag,accept_flag,percent_coverage_str,percent_identity_str,error_line, str(seq_length), cigar])
outfile_cluster.write(cluster_line)
outfile_cluster.write("\n")
continue
accept_flag = "accepted"
cluster_line = "\t".join([read_id,mapped_flag,accept_flag,percent_coverage_str,percent_identity_str,error_line, str(seq_length), cigar])
outfile_cluster.write(cluster_line)
outfile_cluster.write("\n")
#only run poly detetcion on accepted transcripts
downstream_seq,dseq_length,a_count,n_count,a_percent,n_percent = detect_polya(trans_obj,a_window)
trans_obj.add_polya_info(downstream_seq,dseq_length,a_count,n_count,a_percent,n_percent)
#check for multi maps
if read_id in trans_obj_dict:
print("Read has multi map")
print(line)
print(percent_coverage)
print(percent_identity)
trans_obj_a = trans_obj_dict[read_id]
trans_obj_b = trans_obj
best_trans_obj,best_map_id = compare_multimaps(trans_obj_a,trans_obj_b)
#only re-assign if the new map is better, otherwise old map is aready processed
if best_map_id == "B":
trans_obj_dict[read_id] = best_trans_obj
multimap_flag = 1
else:
# if this new map is not going to be used we can skip the rest of the loop
continue
else:
trans_obj_dict[read_id] = trans_obj
#check if a read has multi mapped!
# remove old read info if new map is better
if multimap_flag == 1:
old_group_count = trans_group_dict[read_id]
#check that the old group is not only made up of this read mapping
if len(group_trans_list_dict[old_group_count]) > 1:
group_trans_list_dict[old_group_count].remove(read_id) # remove read from old group
trans_group_dict.pop(read_id, None) # remove read from trans_group_dict, will be re-assigned later
elif len(group_trans_list_dict[old_group_count]) == 1: # group is only made of this mapping
group_trans_list_dict.pop(old_group_count, None) # remove group
trans_group_dict.pop(read_id, None) # remove read from trans_group_dict, will be re-assigned later
else:
print("Error with dealing with multimap management")
print("Warning: temrinated early!")
sys.exit()
#if read_id in trans_group_dict:
# print("cluster multi mapped!")
# print(line)
# print(percent_coverage)
# print(percent_identity)
# print(trans_obj.h_count)
# sys.exit()
#group trans by start and end coords
if this_scaffold == "none":
this_scaffold = scaff_name
group_start_pos = start_pos
group_end_pos = end_pos
group_trans_list_dict[group_count] = []
group_trans_list_dict[group_count].append(read_id)
trans_group_dict[read_id] = group_count
scaffold_list.append(this_scaffold)
continue
if scaff_name == this_scaffold:
if start_pos >= group_start_pos and start_pos <= group_end_pos: #add to group
group_trans_list_dict[group_count].append(read_id)
trans_group_dict[read_id] = group_count
#update group end position
if end_pos > group_end_pos:
group_end_pos = end_pos
elif start_pos > group_end_pos: #start new group
group_count += 1
group_start_pos = start_pos
group_end_pos = end_pos
group_trans_list_dict[group_count] = []
group_trans_list_dict[group_count].append(read_id)
trans_group_dict[read_id] = group_count
elif start_pos < group_start_pos: #check if sam sorted
print("Sam file not sorted!")
print(read_id)
sys.exit()
else: #start new group
this_scaffold = scaff_name
group_start_pos = start_pos
group_end_pos = end_pos
group_count += 1
group_trans_list_dict[group_count] = []
group_trans_list_dict[group_count].append(read_id)
trans_group_dict[read_id] = group_count
scaffold_list.append(this_scaffold)
#check read id add
if group_trans_list_dict[group_count][-1] != read_id:
print("cluster not added to group_trans_list_dict")
print(str(group_count) + " " + read_id)
sys.exit()
if bam_flag == "SAM":
sam_file_obj.close()
total_group_count = group_count
####################################################################################################
########################################################################### loop through groups
merged_obj_dict = {} # merged_obj_dict[final trans id] = merged obj
gene_count = 0
trans_check_count = 0 ##########################################################################debugging
prev_time = track_time(start_time,prev_time)
print("going through groups: " + str(total_group_count))
if len(list(group_trans_list_dict.keys())) == 0:
# if total_group_count == 0:
print("Error, no groups found!")
sys.exit()
multimap_missing_group_flag = 0
for i in xrange(total_group_count+1):
if i not in group_trans_list_dict:
print("Missing group num, check for multi-maps in SAM file")
print("This should only occur if you have a multi-map site that no reads are preferring.")
multimap_missing_group_flag = 1
continue
trans_list = group_trans_list_dict[i]
forward_trans_list = []
reverse_trans_list = []
first_trans_id = trans_list[0]
#separate into forward and reverse
for trans_id in trans_list:
if trans_check_count % 1000 == 0:
print(trans_check_count)
trans_check_count += 1
trans_obj = trans_obj_dict[trans_id]
if trans_obj.strand == "+":
forward_trans_list.append(trans_id)
elif trans_obj.strand == "-":
reverse_trans_list.append(trans_id)
################################################# For variation coverage
scaffold = trans_obj.scaff_name
if scaffold not in var_coverage_dict:
print("scaffold not in var_coverage_dict")
print(scaffold)
#sys.exit()
continue
this_exon_start_list = trans_obj.exon_start_list
this_exon_end_list = trans_obj.exon_end_list
for exon_index in xrange(len(this_exon_start_list)):
this_exon_start = this_exon_start_list[exon_index]
this_exon_end = this_exon_end_list[exon_index]
for this_coord in range(this_exon_start,this_exon_end):
if this_coord in var_coverage_dict[scaffold]:
var_coverage_dict[scaffold][this_coord][trans_id] = 1
if trans_id == "":
print("Issue with trans id in var cov dict")
print(trans_list)
sys.exit()
forward_gene_start_trans_dict,forward_start_gene_list = gene_group(forward_trans_list)
reverse_gene_start_trans_dict,reverse_start_gene_list = gene_group(reverse_trans_list)
all_start_gene_dict = {} # all_start_gene_dict[start] = 1
#collect all starts forward and reverse strands
for gene_start in forward_start_gene_list:
all_start_gene_dict[gene_start] = 1
for gene_start in reverse_gene_start_trans_dict:
all_start_gene_dict[gene_start] = 1
all_start_list = all_start_gene_dict.keys()
all_start_list.sort()
for gene_start in all_start_list:
gene_trans_obj_list = [] #list of trans obj lists
#if a forward and reverse gene start at the same place use this to make the the forward strand gene is represented first
if gene_start in forward_gene_start_trans_dict:
trans_id_list = forward_gene_start_trans_dict[gene_start].keys()
trans_obj_list = []
for trans_id in trans_id_list:
trans_obj_list.append(trans_obj_dict[trans_id])
gene_trans_obj_list.append(trans_obj_list)
if gene_start in reverse_gene_start_trans_dict:
trans_id_list = reverse_gene_start_trans_dict[gene_start].keys()
trans_obj_list = []
for trans_id in trans_id_list:
trans_obj_list.append(trans_obj_dict[trans_id])
gene_trans_obj_list.append(trans_obj_list)
#loop through list of trans obj lists, usually only one list since unlikely for forward and reverse strand genes to coincide
for trans_obj_list in gene_trans_obj_list:
gene_count += 1
#group transcripts by collapsability
if fiveprime_cap_flag == "capped":
match_trans_group_dict,match_group_trans_dict = simplify_gene_capped(trans_obj_list,fiveprime_cap_flag)
elif fiveprime_cap_flag == "no_cap":
match_trans_group_dict,match_group_trans_dict = simplify_gene_nocap(trans_obj_list,fiveprime_cap_flag)
else:
print("Error with cap flag " + fiveprime_cap_flag)
sys.exit()
merge_obj_list = []
tmp_count = 0
for match_group_num in match_group_trans_dict:
tmp_count += 1
tmp_trans_id = "G" + str(gene_count) + ".tmp." + str(tmp_count)
merged_obj = Merged(tmp_trans_id)
match_trans_id_list = match_group_trans_dict[match_group_num].keys()
match_trans_obj_list = []
for match_trans_id in match_trans_id_list:
match_trans_obj = trans_obj_dict[match_trans_id]
match_trans_obj_list.append(match_trans_obj)
merged_obj.add_merged_trans(match_trans_obj)
redundant_trans_flag = 0
if len(match_trans_obj_list) > 1: #if there are redundant transcripts, collapse
redundant_trans_flag = 1
collapse_start_list,collapse_end_list,start_wobble_list,end_wobble_list,collapse_sj_start_err_list,collapse_sj_end_err_list,collapse_start_error_nuc_list,collapse_end_error_nuc_list = collapse_transcripts(match_trans_obj_list,fiveprime_cap_flag,collapse_flag)
merged_obj.add_merge_info(collapse_start_list,collapse_end_list,start_wobble_list,end_wobble_list,collapse_sj_start_err_list,collapse_sj_end_err_list,collapse_start_error_nuc_list,collapse_end_error_nuc_list )
else: # if only one transcript
exon_start_list = match_trans_obj_list[0].exon_start_list
exon_end_list = match_trans_obj_list[0].exon_end_list
start_wobble_list = [0] * len(exon_start_list)
end_wobble_list = [0] * len(exon_start_list)
exon_start_list.sort()
exon_end_list.sort()
collapse_sj_start_err_list = []
collapse_sj_end_err_list = []
solo_trans_obj = match_trans_obj_list[0]
max_exon_num = len(exon_start_list)
collapse_start_error_nuc_list = []
collapse_end_error_nuc_list = []
for exon_index in xrange(len(exon_start_list)): # go from 3 prime end
e_start_priority, e_end_priority, e_start_priority_error,e_end_priority_error = sj_error_priority_finder(solo_trans_obj, exon_index, max_exon_num) ####################################
collapse_sj_start_err_list.append(e_start_priority)
collapse_sj_end_err_list.append(e_end_priority)
collapse_start_error_nuc_list.append(e_start_priority_error)
collapse_end_error_nuc_list.append(e_end_priority_error)
#collapse_sj_start_err_list = trans_obj.sj_pre_error_list
#collapse_sj_end_err_list = trans_obj.sj_post_error_list
merged_obj.add_merge_info(exon_start_list,exon_end_list,start_wobble_list,end_wobble_list,collapse_sj_start_err_list,collapse_sj_end_err_list,collapse_start_error_nuc_list,collapse_end_error_nuc_list )
merge_obj_list.append(merged_obj)
sorted_merge_obj_list = sort_transcripts(merge_obj_list)
trans_count = 0
for merged_obj in sorted_merge_obj_list:
trans_count += 1
final_trans_id = "G" + str(gene_count) + "." + str(trans_count)
merged_obj.trans_id = final_trans_id
print(final_trans_id)
merged_obj_dict[final_trans_id] = merged_obj
#write out to bed file
bed_line = merged_obj.format_bed_line()
outfile_bed.write(bed_line)
outfile_bed.write("\n")
#write out to transcript report file
trans_report_line = merged_obj.format_trans_report_line()
outfile_trans_report.write(trans_report_line)
outfile_trans_report.write("\n")
#write out to rt switch file
num_junct_bind,bind_seq_dict = detect_rt_switch(merged_obj)
if num_junct_bind > 0 :
for junct_num in bind_seq_dict:
first_seq = "".join(bind_seq_dict[junct_num][1])
second_seq = "".join(bind_seq_dict[junct_num][2])
rec_comp_seq = "".join(bind_seq_dict[junct_num][3])
rtswitch_line = "\t".join([final_trans_id,str(junct_num),first_seq,second_seq,rec_comp_seq])
#outfile_rtswitch.write(rtswitch_line)
#outfile_rtswitch.write("\n")
#write out to transcript cluster report file
for merged_trans_id in merged_obj.merged_trans_dict:
merged_trans_obj = merged_obj.merged_trans_dict[merged_trans_id]
cluster_id = merged_trans_obj.cluster_id
scaff_name = merged_trans_obj.scaff_name
strand = merged_trans_obj.strand
start_pos = merged_trans_obj.start_pos
end_pos = merged_trans_obj.end_pos
exon_start_line,exon_end_line = merged_trans_obj.make_exon_start_end_lines()
merge_trans_bed_line = merged_trans_obj.format_bed_line(final_trans_id)
#trans_clust_list = []
#trans_clust_list.append(final_trans_id)
#trans_clust_list.append(cluster_id)
#trans_clust_list.append(scaff_name)
#trans_clust_list.append(strand)
#trans_clust_list.append(str(start_pos))
#trans_clust_list.append(str(end_pos))
#trans_clust_list.append(exon_start_line)
#trans_clust_list.append(exon_end_line)
#trans_clust_line = "\t".join(trans_clust_list)
outfile_trans_clust_report.write(merge_trans_bed_line)
outfile_trans_clust_report.write("\n")
#write out to polya file
downstream_seq = "".join(merged_trans_obj.downstream_seq)
dseq_length = merged_trans_obj.dseq_length
a_count = merged_trans_obj.a_count
a_percent = merged_trans_obj.a_percent * 100
if a_percent > a_perc_thresh:
a_percent_string =str(round(a_percent,2))
polya_file_line = "\t".join([cluster_id,final_trans_id,strand,a_percent_string,str(a_count),downstream_seq])
outfile_polya.write(polya_file_line)
outfile_polya.write("\n")
# original mode end ##############################################
############################################################################################################
############################################################################################################
############################################################################################################
############################################################################################################
############################################################################################################
# no multimap mode start (low mem) ##############################################
##################################################################################################
##################################################################################################
# variation functions for low mem mode
def process_variation(scaffold,variation_dict,var_coverage_dict,var_support_threshold):
cov_group_var_dict = {} # cov_group_var_dict[cov group line][position line] = 1
cov_group_var_list = []
var_type_list = []
var_type_list.append("H")
var_type_list.append("S")
var_type_list.append("M")
var_type_list.append("I")
var_type_list.append("D")
if scaffold not in variation_dict:
print("error with scaffold match in loci_variation")
print(variation_dict.keys())
sys.exit()
position_list = []
position_list = list(variation_dict[scaffold].keys())
position_list.sort()
for var_pos in position_list:
var_cov_trans_id_list = list(var_coverage_dict[scaffold][var_pos].keys())
var_cov_trans_id_list.sort()
var_coverage = len(var_cov_trans_id_list)
var_pos_accept_flag = 0 # Use this to signal if a variation ahs passed threshold for this position
for var_type in var_type_list:
if var_type not in variation_dict[scaffold][var_pos]:
continue
for alt_seq in variation_dict[scaffold][var_pos][var_type]:
read_list = list(variation_dict[scaffold][var_pos][var_type][alt_seq].keys())
var_support_count = len(read_list)
if var_support_count >= var_support_threshold:
var_pos_accept_flag = 1
#scaffold position type alt_allele count cov_count cluster_list
var_outlist = []
var_outlist.append(scaffold)
var_outlist.append(str(var_pos))
var_outlist.append(var_type)
var_outlist.append(alt_seq)
var_outlist.append(str(var_support_count))
var_outlist.append(str(var_coverage))
read_line = ",".join(read_list)
var_outlist.append(read_line)
var_outline = "\t".join(var_outlist)
outfile_variant.write(var_outline)
outfile_variant.write("\n")
#Update variant coverage
if var_pos_accept_flag == 1:
var_cov_trans_line = ",".join(var_cov_trans_id_list)
position_line = "_".join([scaffold,str(var_pos)])
if var_cov_trans_line not in cov_group_var_dict:
cov_group_var_dict[var_cov_trans_line] = {}
cov_group_var_list.append(var_cov_trans_line)
cov_group_var_dict[var_cov_trans_line][position_line] = 1
################################################################################# write to var coverage file
for cov_line in cov_group_var_list:
position_list = list(cov_group_var_dict[cov_line].keys())
position_list.sort()
all_pos_line = ",".join(position_list)
varcov_file_line = "\t".join([all_pos_line,cov_line])
outfile_varcov.write(varcov_file_line)
outfile_varcov.write("\n")
del variation_dict
del var_coverage_dict
##################################################################################################
##################################################################################################
def process_loci(this_trans_obj_dict,trans_list,this_gene_count):
merged_obj_dict = {} # merged_obj_dict[final trans id] = merged obj
forward_trans_list = []
reverse_trans_list = []
first_trans_id = trans_list[0]
#separate into forward and reverse
for trans_id in trans_list:
#if trans_check_count % 1000 == 0:
# print(trans_check_count)
#trans_check_count += 1
trans_obj = this_trans_obj_dict[trans_id]
if trans_obj.strand == "+":
forward_trans_list.append(trans_id)
elif trans_obj.strand == "-":
reverse_trans_list.append(trans_id)
forward_gene_start_trans_dict,forward_start_gene_list = gene_group(forward_trans_list)
reverse_gene_start_trans_dict,reverse_start_gene_list = gene_group(reverse_trans_list)
all_start_gene_dict = {} # all_start_gene_dict[start] = 1
#collect all starts forward and reverse strands
for gene_start in forward_start_gene_list:
all_start_gene_dict[gene_start] = 1
for gene_start in reverse_gene_start_trans_dict:
all_start_gene_dict[gene_start] = 1
all_start_list = all_start_gene_dict.keys()
all_start_list.sort()
for gene_start in all_start_list:
gene_trans_obj_list = [] #list of trans obj lists
#if a forward and reverse gene start at the same place use this to make the the forward strand gene is represented first
if gene_start in forward_gene_start_trans_dict:
trans_id_list = forward_gene_start_trans_dict[gene_start].keys()
trans_obj_list = []
for trans_id in trans_id_list:
trans_obj_list.append(this_trans_obj_dict[trans_id])
gene_trans_obj_list.append(trans_obj_list)
if gene_start in reverse_gene_start_trans_dict:
trans_id_list = reverse_gene_start_trans_dict[gene_start].keys()
trans_obj_list = []
for trans_id in trans_id_list:
trans_obj_list.append(this_trans_obj_dict[trans_id])
gene_trans_obj_list.append(trans_obj_list)
#loop through list of trans obj lists, usually only one list since unlikely for forward and reverse strand genes to coincide
for trans_obj_list in gene_trans_obj_list:
this_gene_count += 1
#group transcripts by collapsability
if fiveprime_cap_flag == "capped":
match_trans_group_dict,match_group_trans_dict = simplify_gene_capped(trans_obj_list,fiveprime_cap_flag)
elif fiveprime_cap_flag == "no_cap":
match_trans_group_dict,match_group_trans_dict = simplify_gene_nocap(trans_obj_list,fiveprime_cap_flag)
else:
print("Error with cap flag " + fiveprime_cap_flag)
sys.exit()
merge_obj_list = []
tmp_count = 0
for match_group_num in match_group_trans_dict:
tmp_count += 1
tmp_trans_id = "G" + str(this_gene_count) + ".tmp." + str(tmp_count)
merged_obj = Merged(tmp_trans_id)
match_trans_id_list = match_group_trans_dict[match_group_num].keys()
match_trans_obj_list = []
for match_trans_id in match_trans_id_list:
match_trans_obj = this_trans_obj_dict[match_trans_id]
match_trans_obj_list.append(match_trans_obj)
merged_obj.add_merged_trans(match_trans_obj)
redundant_trans_flag = 0
if len(match_trans_obj_list) > 1: #if there are redundant transcripts, collapse
redundant_trans_flag = 1
collapse_start_list,collapse_end_list,start_wobble_list,end_wobble_list,collapse_sj_start_err_list,collapse_sj_end_err_list,collapse_start_error_nuc_list,collapse_end_error_nuc_list = collapse_transcripts(match_trans_obj_list,fiveprime_cap_flag,collapse_flag)
merged_obj.add_merge_info(collapse_start_list,collapse_end_list,start_wobble_list,end_wobble_list,collapse_sj_start_err_list,collapse_sj_end_err_list,collapse_start_error_nuc_list,collapse_end_error_nuc_list )
else: # if only one transcript
exon_start_list = match_trans_obj_list[0].exon_start_list
exon_end_list = match_trans_obj_list[0].exon_end_list
start_wobble_list = [0] * len(exon_start_list)
end_wobble_list = [0] * len(exon_start_list)
exon_start_list.sort()
exon_end_list.sort()
collapse_sj_start_err_list = []
collapse_sj_end_err_list = []
solo_trans_obj = match_trans_obj_list[0]
max_exon_num = len(exon_start_list)
collapse_start_error_nuc_list = []
collapse_end_error_nuc_list = []
for exon_index in xrange(len(exon_start_list)): # go from 3 prime end
e_start_priority, e_end_priority, e_start_priority_error,e_end_priority_error = sj_error_priority_finder(solo_trans_obj, exon_index, max_exon_num) ####################################
collapse_sj_start_err_list.append(e_start_priority)
collapse_sj_end_err_list.append(e_end_priority)
collapse_start_error_nuc_list.append(e_start_priority_error)
collapse_end_error_nuc_list.append(e_end_priority_error)
#collapse_sj_start_err_list = trans_obj.sj_pre_error_list
#collapse_sj_end_err_list = trans_obj.sj_post_error_list
merged_obj.add_merge_info(exon_start_list,exon_end_list,start_wobble_list,end_wobble_list,collapse_sj_start_err_list,collapse_sj_end_err_list,collapse_start_error_nuc_list,collapse_end_error_nuc_list )
merge_obj_list.append(merged_obj)
sorted_merge_obj_list = sort_transcripts(merge_obj_list)
trans_count = 0
for merged_obj in sorted_merge_obj_list:
trans_count += 1
final_trans_id = "G" + str(this_gene_count) + "." + str(trans_count)
merged_obj.trans_id = final_trans_id
print(final_trans_id)
merged_obj_dict[final_trans_id] = merged_obj
#write out to bed file
bed_line = merged_obj.format_bed_line()
outfile_bed.write(bed_line)
outfile_bed.write("\n")
#write out to transcript report file
trans_report_line = merged_obj.format_trans_report_line()
outfile_trans_report.write(trans_report_line)
outfile_trans_report.write("\n")
#write out to rt switch file
num_junct_bind,bind_seq_dict = detect_rt_switch(merged_obj)
if num_junct_bind > 0 :
for junct_num in bind_seq_dict:
first_seq = "".join(bind_seq_dict[junct_num][1])
second_seq = "".join(bind_seq_dict[junct_num][2])
rec_comp_seq = "".join(bind_seq_dict[junct_num][3])
rtswitch_line = "\t".join([final_trans_id,str(junct_num),first_seq,second_seq,rec_comp_seq])
#outfile_rtswitch.write(rtswitch_line)
#outfile_rtswitch.write("\n")
#write out to transcript cluster report file
for merged_trans_id in merged_obj.merged_trans_dict:
merged_trans_obj = merged_obj.merged_trans_dict[merged_trans_id]
cluster_id = merged_trans_obj.cluster_id
scaff_name = merged_trans_obj.scaff_name
strand = merged_trans_obj.strand
start_pos = merged_trans_obj.start_pos
end_pos = merged_trans_obj.end_pos
exon_start_line,exon_end_line = merged_trans_obj.make_exon_start_end_lines()
merge_trans_bed_line = merged_trans_obj.format_bed_line(final_trans_id)
#trans_clust_line = "\t".join(trans_clust_list)
outfile_trans_clust_report.write(merge_trans_bed_line)
outfile_trans_clust_report.write("\n")
#write out to polya file
downstream_seq = "".join(merged_trans_obj.downstream_seq)
dseq_length = merged_trans_obj.dseq_length
a_count = merged_trans_obj.a_count
a_percent = merged_trans_obj.a_percent * 100
if a_percent > a_perc_thresh:
a_percent_string =str(round(a_percent,2))
polya_file_line = "\t".join([cluster_id,final_trans_id,strand,a_percent_string,str(a_count),downstream_seq])
outfile_polya.write(polya_file_line)
outfile_polya.write("\n")
#del this_trans_obj_dict
#del trans_list
return this_gene_count
############################################################################################################
############################################################################################################
if run_mode_flag == "low_mem":
gene_count = 0
trans_obj_dict = {} # trans_obj_dict[cluster id] = trans obj
group_trans_list = []
for line in sam_file_contents:
#if sam_count == 0:
# print(line)
line_split = line.split("\t")
if line.startswith("@"):
continue
if line == "":
continue
sam_count += 1
if sam_count % 5000 == 0:
print("sam count " + str(sam_count))
read_id = line_split[0]
sam_flag = int(line_split[1])
scaff_name = line_split[2]
start_pos = int(line_split[3])
cigar = line_split[5]
read_seq = line_split[9]
seq_list = list(read_seq)
mapped_flag = sam_flag_dict[sam_flag]
####################################
#Check sam and gmap strand info!!!
#get strand information from gmap flag
xs_flag = "na"
for field in line_split:
if "XS:A:" in field:
xs_flag = field.split(":")[-1]
if mapped_flag == "forward_strand" and xs_flag == "-":
outline_strand = "\t".join([read_id,scaff_name,str(start_pos),cigar,"+-"])
outfile_strand.write(outline_strand)
outfile_strand.write("\n")
elif mapped_flag == "reverse_strand" and xs_flag == "+":
outline_strand = "\t".join([read_id,scaff_name,str(start_pos),cigar,"-+"])
outfile_strand.write(outline_strand)
outfile_strand.write("\n")
#
# Above: Check sam and gmap strand info!!!
####################################
if mapped_flag == "unmapped" or mapped_flag == "not_primary" or mapped_flag == "chimeric" :
unmapped_dict[read_id] = 1
accept_flag = mapped_flag # added this 2019/03/04
percent_coverage = "NA"
percent_identity = "NA"
error_line = "NA"
quality_percent = "NA"
length = "NA"
strand = "NA"
cigar = "NA"
cluster_line = "\t".join([read_id,mapped_flag,accept_flag,percent_coverage,percent_identity,error_line, length, cigar])
outfile_cluster.write(cluster_line)
outfile_cluster.write("\n")
continue
map_seq_length = mapped_seq_length(cigar)
[end_pos,exon_start_list,exon_end_list] = trans_coordinates(start_pos,cigar)
[h_count,s_count,i_count,d_count,mis_count,nomatch_dict,sj_pre_error_list,sj_post_error_list] = calc_error_rate_lowmem(start_pos,cigar,seq_list,scaff_name,read_id)
trans_obj = Transcript(read_id)
trans_obj.add_sam_info(sam_flag,scaff_name,start_pos,cigar,read_seq,seq_list)
trans_obj.add_map_seq_length(map_seq_length)
trans_obj.add_exon_coords(end_pos,exon_start_list,exon_end_list)
trans_obj.add_mismatch(h_count,s_count,i_count,d_count,mis_count,nomatch_dict,sj_pre_error_list,sj_post_error_list)
##### 2020/07/27 sj hash
#trans_obj.make_sj_hash_string()
##### 2020/07/27 sj hash
percent_coverage = trans_obj.calc_coverage()
percent_identity = trans_obj.calc_identity()
percent_coverage_str = str(round(percent_coverage,2))
percent_identity_str = str(round(percent_identity,2))
error_line = trans_obj.make_error_line()
seq_length = trans_obj.seq_length
strand = trans_obj.strand
multimap_flag = 0
if percent_coverage < coverage_threshold or percent_identity < identity_threshold:
accept_flag = "discarded"
cluster_line = "\t".join([read_id,mapped_flag,accept_flag,percent_coverage_str,percent_identity_str,error_line, str(seq_length), cigar])
outfile_cluster.write(cluster_line)
outfile_cluster.write("\n")
#skip the transcript because the mapping is poor
continue
else:
bad_sj_flag,bad_sj_num_list,bad_sj_num_pre_list,bad_sj_num_post_list,bad_sj_error_count_list,lde_outline = sj_error_local_density(trans_obj)
outfile_lde.write(lde_outline)
outfile_lde.write("\n")
if bad_sj_flag > 0:
accept_flag = "local_density_error"
cluster_line = "\t".join([read_id,mapped_flag,accept_flag,percent_coverage_str,percent_identity_str,error_line, str(seq_length), cigar])
outfile_cluster.write(cluster_line)
outfile_cluster.write("\n")
continue
accept_flag = "accepted"
cluster_line = "\t".join([read_id,mapped_flag,accept_flag,percent_coverage_str,percent_identity_str,error_line, str(seq_length), cigar])
outfile_cluster.write(cluster_line)
outfile_cluster.write("\n")
#only run poly detection on accepted transcripts
downstream_seq,dseq_length,a_count,n_count,a_percent,n_percent = detect_polya(trans_obj,a_window)
trans_obj.add_polya_info(downstream_seq,dseq_length,a_count,n_count,a_percent,n_percent)
trans_obj_dict[read_id] = trans_obj
#group trans by start and end coords
if this_scaffold == "none":
this_scaffold = scaff_name
group_start_pos = start_pos
group_end_pos = end_pos
group_trans_list = []
group_trans_list.append(read_id)
scaffold_list.append(this_scaffold)
continue
if scaff_name == this_scaffold:
if start_pos >= group_start_pos and start_pos <= group_end_pos: #add to group
# this is so we can get variation information after confirming the read belongs to this group
# 2020/07/31
calc_variation(start_pos, cigar, seq_list, scaff_name, read_id)
group_trans_list.append(read_id)
#update group end position
if end_pos > group_end_pos:
group_end_pos = end_pos
elif start_pos > group_end_pos: #start new group #################################
# clear new read id from new group
trans_obj_dict.pop(read_id, None)
gene_count = process_loci(trans_obj_dict,group_trans_list, gene_count) #################################### 2020/07/30
#if variation_dict:
# if len(group_trans_list) >= var_support_threshold:
# process_variation(this_scaffold, variation_dict, var_coverage_dict,var_support_threshold)
# #refresh variation dict
# #del variation_dict
# #del var_coverage_dict
# variation_dict = {}
# var_coverage_dict = {}
# add variation information from first read of new group
# 2020/07/31
#calc_variation(start_pos, cigar, seq_list, scaff_name, read_id)
[h_count,s_count,i_count,d_count,mis_count,nomatch_dict,sj_pre_error_list,sj_post_error_list] = calc_error_rate_lowmem(start_pos,cigar,seq_list,scaff_name,read_id)
group_count += 1
group_start_pos = start_pos
group_end_pos = end_pos
#del trans_obj_dict
trans_obj_dict = {} # trans_obj_dict[cluster id] = trans obj # refresh this dict to save memory
trans_obj_dict[read_id] = trans_obj
#del group_trans_list
group_trans_list = [] # refresh this dict to save memory
group_trans_list.append(read_id)
elif start_pos < group_start_pos: #check if sam sorted
print("Sam file not sorted!")
print(read_id)
sys.exit()
else: #start new group #################################
# clear new read id from new group
trans_obj_dict.pop(read_id, None)
gene_count = process_loci(trans_obj_dict, group_trans_list, gene_count) #################################### 2020/07/30
#if variation_dict:
# if len(group_trans_list) >= var_support_threshold:
# process_variation(this_scaffold, variation_dict, var_coverage_dict,var_support_threshold)
#
# #refresh variation dict
# #del variation_dict
# #del var_coverage_dict
# variation_dict = {}
# var_coverage_dict = {}
# add variation information from first read of new group
# 2020/07/31
#calc_variation(start_pos, cigar, seq_list, scaff_name, read_id)
[h_count,s_count,i_count,d_count,mis_count,nomatch_dict,sj_pre_error_list,sj_post_error_list] = calc_error_rate_lowmem(start_pos,cigar,seq_list,scaff_name,read_id)
this_scaffold = scaff_name
group_start_pos = start_pos
group_end_pos = end_pos
group_count += 1
scaffold_list.append(this_scaffold)
#del trans_obj_dict
trans_obj_dict = {} # trans_obj_dict[cluster id] = trans obj # refresh this dict to save memory
trans_obj_dict[read_id] = trans_obj
#del group_trans_list
group_trans_list = [] # refresh this dict to save memory
group_trans_list.append(read_id)
# make sure to handle the last group for the low mem mode
# this is not an issue with the original mode because you collect groups as you go along.
# but in low mem mode you only process a group when a new group is found.
gene_count = process_loci(trans_obj_dict, group_trans_list, gene_count) #################################### 2020/07/30
#if variation_dict:
# if len(group_trans_list) >= var_support_threshold:
# process_variation(this_scaffold, variation_dict, var_coverage_dict,var_support_threshold)
# #refresh variation dict
# #del variation_dict
# #del var_coverage_dict
# variation_dict = {}
# var_coverage_dict = {}
if bam_flag == "SAM":
sam_file_obj.close()
total_group_count = group_count
####################################################################################################
########################################################################### loop through groups
prev_time = track_time(start_time,prev_time)
# no multimap mode end (low mem) ##############################################
############################################################################################################
############################################################################################################
################################################################################# write to variation file
if run_mode_flag == "original":
cov_group_var_dict = {} # cov_group_var_dict[cov group line][position line] = 1
cov_group_var_list = []
var_type_list = []
var_type_list.append("H")
var_type_list.append("S")
var_type_list.append("M")
var_type_list.append("I")
var_type_list.append("D")
var_support_threshold = 5
prev_time = track_time(start_time,prev_time)
print("Writing variant file")
for scaffold in scaffold_list:
if scaffold not in variation_dict:
continue
position_list = []
position_list = list(variation_dict[scaffold].keys())
position_list.sort()
for var_pos in position_list:
var_cov_trans_id_list = list(var_coverage_dict[scaffold][var_pos].keys())
var_cov_trans_id_list.sort()
var_coverage = len(var_cov_trans_id_list)
########################################################################################2020/12/14
if var_pos > len(fasta_dict[scaffold]) or var_pos < 0:
print("Read mapping off scaffold")
print(scaffold +" : "+ str(var_pos))
print(var_cov_trans_id_list)
continue
# print(variation_dict[scaffold][var_pos])
########################################################################################
ref_allele = fasta_dict[scaffold][var_pos]
var_pos_accept_flag = 0 # Use this to signal if a variation ahs passed threshold for this position
for var_type in var_type_list:
if var_type not in variation_dict[scaffold][var_pos]:
continue
if var_type != "M":
ref_allele = "NA"
for alt_seq in variation_dict[scaffold][var_pos][var_type]:
read_list = list(variation_dict[scaffold][var_pos][var_type][alt_seq].keys())
var_support_count = len(read_list)
if var_support_count >= var_support_threshold:
var_pos_accept_flag = 1
#scaffold position type alt_allele count cov_count cluster_list
var_outlist = []
var_outlist.append(scaffold)
var_outlist.append(str(var_pos))
var_outlist.append(var_type)
var_outlist.append(ref_allele)
var_outlist.append(alt_seq)
var_outlist.append(str(var_support_count))
var_outlist.append(str(var_coverage))
read_line = ",".join(read_list)
var_outlist.append(read_line)
var_outline = "\t".join(var_outlist)
outfile_variant.write(var_outline)
outfile_variant.write("\n")
#Update variant coverage
if var_pos_accept_flag == 1:
var_cov_trans_line = ",".join(var_cov_trans_id_list)
position_line = "_".join([scaffold,str(var_pos)])
if var_cov_trans_line not in cov_group_var_dict:
cov_group_var_dict[var_cov_trans_line] = {}
cov_group_var_list.append(var_cov_trans_line)
cov_group_var_dict[var_cov_trans_line][position_line] = 1
prev_time = track_time(start_time,prev_time)
################################################################################# write to var coverage file
for cov_line in cov_group_var_list:
position_list = list(cov_group_var_dict[cov_line].keys())
position_list.sort()
all_pos_line = ",".join(position_list)
varcov_file_line = "\t".join([all_pos_line,cov_line])
outfile_varcov.write(varcov_file_line)
outfile_varcov.write("\n")
prev_time = track_time(start_time,prev_time)
if run_mode_flag == "original":
if multimap_missing_group_flag == 1:
print("Missing group num, check for multi-maps in SAM file")
print("This should only occur if you have a multi-map site that no reads are preferring.")
print("TAMA Collapse has successfully finished running!") | GenomeRIK/tama | tama_collapse.py | Python | gpl-3.0 | 263,442 | [
"pysam"
] | 33251fc7d1943bcad04149ab8fdafac84cdb0cfb0ab31c42ab62ec7639cfe8e6 |
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
# Authors: Christoph Klein
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import tempfile, os
import numpy as np
import mdtraj as md
from mdtraj.formats import XYZTrajectoryFile
from mdtraj.testing import eq
fd, temp = tempfile.mkstemp(suffix='.xyz')
fd_gz, temp_gz = tempfile.mkstemp(suffix='.xyz.gz')
def teardown_module(module):
"""Remove the temporary file created by tests in this file
this gets automatically called by pytest. """
os.close(fd)
os.unlink(temp)
def test_read_0(get_fn):
with XYZTrajectoryFile(get_fn('frame0.xyz')) as f:
xyz = f.read()
with XYZTrajectoryFile(get_fn('frame0.xyz')) as f:
xyz3 = f.read(stride=3)
eq(xyz[::3], xyz3)
def test_read_1(get_fn):
reference = md.load(get_fn('frame0.dcd'), top=get_fn('native.pdb'))
traj = md.load(get_fn('frame0.xyz'), top=get_fn('native.pdb'))
eq(reference.xyz[0], traj.xyz[0], decimal=3)
def test_read_gz(get_fn):
reference = md.load(get_fn('frame0.dcd'), top=get_fn('native.pdb'))
traj = md.load(get_fn('frame0.xyz.gz'), top=get_fn('native.pdb'))
eq(reference.xyz[0], traj.xyz[0], decimal=3)
def test_read_write():
xyz = np.around(10 * np.random.randn(100, 11, 3), decimals=3)
with XYZTrajectoryFile(temp, mode='w') as f:
f.write(xyz)
with XYZTrajectoryFile(temp) as f:
xyz2 = f.read()
eq(xyz, xyz2)
def test_mdwrite(get_fn):
t = md.load(get_fn('frame0.xyz'), top=get_fn('native.pdb'))
t.save(temp)
t.save(temp_gz)
def test_multiread(get_fn):
reference = md.load(get_fn('frame0.xyz'), top=get_fn('native.pdb'))
with XYZTrajectoryFile(get_fn('frame0.xyz')) as f:
xyz0 = f.read(n_frames=1)
xyz1 = f.read(n_frames=1)
eq(reference.xyz[0], xyz0[0] / 10)
eq(reference.xyz[1], xyz1[0] / 10)
def test_seek(get_fn):
reference = md.load(get_fn('frame0.xyz'), top=get_fn('native.pdb'))
with XYZTrajectoryFile(get_fn('frame0.xyz')) as f:
f.seek(1)
eq(1, f.tell())
xyz1 = f.read(n_frames=1)
eq(reference.xyz[1], xyz1[0] / 10)
f.seek(10)
eq(10, f.tell())
xyz10 = f.read(n_frames=1)
eq(reference.xyz[10], xyz10[0] / 10)
eq(11, f.tell())
f.seek(-8, 1)
xyz3 = f.read(n_frames=1)
eq(reference.xyz[3], xyz3[0] / 10)
f.seek(4, 1)
xyz8 = f.read(n_frames=1)
eq(reference.xyz[8], xyz8[0] / 10)
def test_len(get_fn):
with md.open(get_fn('frame0.xyz')) as fh:
assert len(fh) == 501
assert fh._frame_index == 0
assert len(fh.read()) == 501
| dwhswenson/mdtraj | tests/test_xyz.py | Python | lgpl-2.1 | 3,549 | [
"MDTraj"
] | 1990a44b4e23cac0a46679d3b990f676350e9e36d267ed865ae95939c49ce6b5 |
#!/usr/bin/env python
import os
import numpy as np
np.seterr(invalid='ignore', divide='ignore')
from decimal import *
from scipy import interpolate
from scipy.interpolate import InterpolatedUnivariateSpline
import string
import subprocess
import time
import multiprocessing
TWOPLACES = Decimal(10) ** -2 # same as Decimal('0.01')
FOURPLACES = Decimal(10) ** -4 # same as Decimal('0.0001')
SIXPLACES = Decimal(10) ** -6 # same as Decimal('0.000001')
McGreer_Redshift = 5.9
# The redshift of the QSO
QSO_Redshift = 7.0842
class Likelihood21cmFast_multiz(object):
def __init__(self, Redshifts_For_LF,Muv_values, phi_values, phi_Error, k_values, PS_values, Error_k_values, PS_Error, Redshift, Redshifts_For_Prior, param_legend, Fiducial_Params, FlagOptions, param_string_names, NSplinePoints,
TsCalc_z, Foreground_cut, Shot_Noise_cut, IncludeLightCone, IncludeLF, ModUncert, PriorLegend, NFValsQSO, PDFValsQSO):
self.Redshifts_For_LF = Redshifts_For_LF # New in v1.4
self.Muv_values = Muv_values # New in v1.4
self.phi_values = phi_values # New in v1.4
self.phi_Error = phi_Error # New in v1.4
self.k_values = k_values
self.PS_values = PS_values
self.Error_k_values = Error_k_values
self.PS_Error = PS_Error
self.Redshift = Redshift
self.Redshifts_For_Prior = Redshifts_For_Prior
self.param_legend = param_legend
self.Fiducial_Params = Fiducial_Params
self.FlagOptions = FlagOptions
self.param_string_names = param_string_names
self.NSplinePoints = NSplinePoints
self.TsCalc_z = TsCalc_z
self.Foreground_cut = Foreground_cut
self.Shot_Noise_cut = Shot_Noise_cut
self.IncludeLightCone = IncludeLightCone
self.IncludeLF = IncludeLF
self.ModUncert = ModUncert
self.PriorLegend = PriorLegend
self.NFValsQSO = NFValsQSO
self.PDFValsQSO = PDFValsQSO
def Likelihood(self,ctx):
params = ctx.getParams()
# If the light-cone option is set, we do not return the neutral fraction as it can be a large amount of data (also less useful).
# Only really helpful (if at all) for co-eval cubes
if self.IncludeLightCone is True:
nf_vals = np.zeros(3)
else:
# If we are applying the optical depth prior, then we might as well keep the value of the electron scattering optical depth
if self.PriorLegend['PlanckPrior'] is True or self.FlagOptions['KEEP_ALL_DATA'] is True:
nf_vals = np.zeros(len(self.Redshift) + len(self.Redshifts_For_Prior)+3)
else:
nf_vals = np.zeros(len(self.Redshift) + len(self.Redshifts_For_Prior)+2)
# Generate a unique ID for each thread by sampling a randomly seeded distribution.
# Given than file I/O needs to be unique to each thread, it is beneficial to provide a unique ID in the off chance that two different threads
# end up with the same walker position (same parameter set)
np.random.seed()
random_number = np.random.normal(size=1)
# Create a second unique ID, that being the first variable of the specific walker (fail-safe against ID overlap; shouldn't happen, but guarding against anyway)
Individual_ID = Decimal(repr(random_number[0])).quantize(SIXPLACES)
Individual_ID_2 = Decimal(repr(params[0])).quantize(SIXPLACES)
# Add all the redshifts (those for the likelihood and those for prior only). This parameter is only used where this is relevant
number_redshifts = len(self.Redshift) + len(self.Redshifts_For_Prior)
# Add and sort all redshifts (those for the likelihood and those for prior only)
AllRedshifts = []
if self.IncludeLightCone is False:
for i in range(len(self.Redshift)):
AllRedshifts.append(self.Redshift[i])
for i in range(len(self.Redshifts_For_Prior)):
AllRedshifts.append(self.Redshifts_For_Prior[i])
AllRedshifts.sort(key=float)
StoredStatisticalData = []
StoredStatisticalData_Error = []
StoredFileLayout = []
StoredFileLayout_Error = []
separator_column = "\t"
if self.IncludeLightCone is True:
LightConeFlag = 1
else:
LightConeFlag = 0
separator = " "
separator_other = "_"
seq = []
# Add the random thread ID
seq.append("%s"%(Individual_ID))
# Add the second ID
seq.append("%s"%(Individual_ID_2))
StringArgument_other = string.join(seq,separator_other)
# Add number of redshifts
# If using the light-cone version of the code, don't need to set a redshift
if self.IncludeLightCone is True:
seq.append("0")
else:
seq.append("%s"%(number_redshifts))
# Add light cone flag
seq.append("%s"%(LightConeFlag))
# If mass-dependence on ionising efficiency is allowed. Add the flag here
if self.FlagOptions['USE_MASS_DEPENDENT_ZETA'] is True:
seq.append("1")
else:
seq.append("0")
# Add redshift for Ts.c calculation
seq.append("%s"%(self.TsCalc_z))
#StringArgument = string.join(seq,separator)
#print 'StringArgument:',StringArgument
#if self.IncludeLF is True:
if self.IncludeLF is 1:
seq.append("1")
elif self.IncludeLF is 2:
seq.append("2")
else:
seq.append("0")
StringArgument = string.join(seq,separator)
##### Now we need to create the individual walker file to be read by drive_21cmMC_streamlined #####
if self.FlagOptions['GENERATE_NEW_ICS'] is True:
GenerateNewICs = 1
else:
GenerateNewICs = 0
if self.FlagOptions['INCLUDE_RSDS'] is True:
Subcell_RSDs = 1
else:
Subcell_RSDs = 0
if self.FlagOptions['USE_IONISATION_FCOLL_TABLE'] is True:
IONISATION_FCOLL_TABLE = 1
else:
IONISATION_FCOLL_TABLE = 0
if self.FlagOptions['USE_FCOLL_TABLE'] is True:
UseFcollTable = 1
else:
UseFcollTable = 0
if self.FlagOptions['CALC_TS_FLUC'] is True:
PerformTsCalc = 1
else:
PerformTsCalc = 0
if self.FlagOptions['USE_INHOMO_RECO'] is True:
INHOMO_RECO = 1
else:
INHOMO_RECO = 0
if self.FlagOptions['KEEP_GLOBAL_DATA'] is True:
OutputGlobalAve = 1
else:
if self.PriorLegend['PlanckPrior'] is True or self.PriorLegend['McGreerPrior'] is True or self.PriorLegend['GreigPrior'] is True or self.FlagOptions['KEEP_ALL_DATA'] is True:
OutputGlobalAve = 1
elif self.IncludeLightCone is True:
OutputGlobalAve = 1
else:
OutputGlobalAve = 0
parameter_number = 0
create_file = open("Walker_%s.txt"%(StringArgument_other),"w")
create_file.write("FLAGS %s %s %s %s %s %s %s\n"%(GenerateNewICs,Subcell_RSDs,IONISATION_FCOLL_TABLE,UseFcollTable,PerformTsCalc,INHOMO_RECO,OutputGlobalAve))
# New in v1.4
if self.param_legend['F_STAR10'] is True:
create_file.write("F_STAR10 %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("F_STAR10 %s\n"%(self.Fiducial_Params['F_STAR10']))
if self.param_legend['ALPHA_STAR'] is True:
create_file.write("ALPHA_STAR %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("ALPHA_STAR %s\n"%(self.Fiducial_Params['ALPHA_STAR']))
if self.param_legend['F_ESC10'] is True:
create_file.write("F_ESC10 %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("F_ESC10 %s\n"%(self.Fiducial_Params['F_ESC10']))
if self.param_legend['ALPHA_ESC'] is True:
create_file.write("ALPHA_ESC %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("ALPHA_ESC %s\n"%(self.Fiducial_Params['ALPHA_ESC']))
if self.param_legend['M_TURN'] is True:
create_file.write("M_TURN %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("M_TURN %s\n"%(self.Fiducial_Params['M_TURN']))
if self.param_legend['t_STAR'] is True:
create_file.write("t_STAR %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("t_STAR %s\n"%(self.Fiducial_Params['t_STAR']))
if self.param_legend['ZETA'] is True:
create_file.write("ZETA %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("ZETA %s\n"%(self.Fiducial_Params['ZETA']))
if self.param_legend['MFP'] is True:
create_file.write("MFP %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("MFP %s\n"%(self.Fiducial_Params['MFP']))
if self.param_legend['TVIR_MIN'] is True:
create_file.write("TVIR_MIN %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
X_RAY_TVIR_MIN = params[parameter_number]
parameter_number += 1
else:
create_file.write("TVIR_MIN %s\n"%(self.Fiducial_Params['TVIR_MIN']))
if self.param_legend['L_X'] is True:
create_file.write("L_X %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("L_X %s\n"%(self.Fiducial_Params['L_X']))
if self.param_legend['NU_X_THRESH'] is True:
create_file.write("NU_X_THRESH %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("NU_X_THRESH %s\n"%(self.Fiducial_Params['NU_X_THRESH']))
create_file.write("NU_X_BAND_MAX %s\n"%(self.Fiducial_Params['NU_X_BAND_MAX']))
create_file.write("NU_X_MAX %s\n"%(self.Fiducial_Params['NU_X_MAX']))
if self.param_legend['X_RAY_SPEC_INDEX'] is True:
create_file.write("X_RAY_SPEC_INDEX %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("X_RAY_SPEC_INDEX %s\n"%(self.Fiducial_Params['X_RAY_SPEC_INDEX']))
if self.param_legend['TVIR_MIN'] is True:
create_file.write("X_RAY_TVIR_MIN %s\n"%(Decimal(repr(X_RAY_TVIR_MIN)).quantize(SIXPLACES)))
else:
create_file.write("X_RAY_TVIR_MIN %s\n"%(self.Fiducial_Params['X_RAY_TVIR_MIN']))
create_file.write("X_RAY_TVIR_LB %s\n"%(self.Fiducial_Params['X_RAY_TVIR_LB']))
create_file.write("X_RAY_TVIR_UB %s\n"%(self.Fiducial_Params['X_RAY_TVIR_UB']))
#create_file.write("F_STAR %s\n"%(self.Fiducial_Params['F_STAR']))
create_file.write("N_RSD_STEPS %s\n"%(self.Fiducial_Params['N_RSD_SUBCELLS']))
create_file.write("LOS_direction %s\n"%(self.Fiducial_Params['LOS_direction']))
if self.IncludeLightCone is False:
for i in range(number_redshifts):
create_file.write("CO-EVAL-Z %s\n"%(AllRedshifts[i]))
create_file.close()
if self.FlagOptions['GENERATE_NEW_ICS'] is True:
# A random number between 1 and 10^12 should be sufficient to randomise the ICs
RandomSeed = np.random.uniform(low=1,high=1e12,size=1)
# Now create the cosmology file associated with this walker.
create_file = open("WalkerCosmology_%s.txt"%(StringArgument_other),"w")
if self.FlagOptions['GENERATE_NEW_ICS'] is True:
create_file.write("RANDOM_SEED %s\n"%(RandomSeed[0]))
else:
create_file.write("RANDOM_SEED %s\n"%(Decimal(repr(1.0)).quantize(SIXPLACES)))
if self.param_legend['SIGMA_8'] is True:
create_file.write("SIGMA_8 %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("SIGMA_8 %s\n"%(self.Fiducial_Params['SIGMA_8']))
if self.param_legend['littleh'] is True:
create_file.write("hubble %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("hubble %s\n"%(self.Fiducial_Params['littleh']))
if self.param_legend['OMEGA_M'] is True:
create_file.write("Omega_M %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("Omega_M %s\n"%(self.Fiducial_Params['OMEGA_M']))
if self.param_legend['OMEGA_M'] is True:
create_file.write("Omega_L %s\n"%(Decimal(repr(1. - params[parameter_number-1])).quantize(SIXPLACES)))
else:
create_file.write("Omega_L %s\n"%(Decimal(repr(1. - float(self.Fiducial_Params['OMEGA_M']))).quantize(SIXPLACES)))
if self.param_legend['OMEGA_b'] is True:
create_file.write("Omega_b %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("Omega_b %s\n"%(self.Fiducial_Params['OMEGA_b']))
if self.param_legend['NS'] is True:
create_file.write("ns %s\n"%(Decimal(repr(params[parameter_number])).quantize(SIXPLACES)))
parameter_number += 1
else:
create_file.write("ns %s\n"%(self.Fiducial_Params['NS']))
create_file.close()
if self.FlagOptions['LOG_LINEAR_K_SAMPLING'] is True:
kSplineMin = np.log10(self.Foreground_cut)
kSplineMax = np.log10(self.Shot_Noise_cut)
else:
kSplineMin = self.Foreground_cut
kSplineMax = self.Shot_Noise_cut
kSpline = np.zeros(self.NSplinePoints)
for j in range(self.NSplinePoints):
kSpline[j] = kSplineMin + (kSplineMax - kSplineMin)*float(j)/(self.NSplinePoints - 1)
if self.FlagOptions['LOG_LINEAR_K_SAMPLING'] is True:
kSpline = 10**( kSpline )
counter = 0
command = "./drive_21cmMC_streamlined %s"%(StringArgument)
os.system(command)
total_sum = 0
if self.FlagOptions['KEEP_GLOBAL_DATA'] is True:
k_values_estimate = np.loadtxt('AveData_%s.txt'%(StringArgument_other), usecols=(0,))
PS_values_estimate = np.loadtxt('AveData_%s.txt'%(StringArgument_other), usecols=(2,))
if self.IncludeLightCone is False:
k_values_estimate = k_values_estimate[::-1]
PS_values_estimate = PS_values_estimate[::-1]
# Converting the redshifts to frequencies for the interpolation (must be in increasing order, it is by default redshift which is decreasing)
FrequencyValues_mock = np.zeros(len(self.k_values[0]))
FrequencyValues_model = np.zeros(len(k_values_estimate))
# Shouldn't need two, as they should be the same sampling. However, just done it for now
for j in range(len(self.k_values[0])):
FrequencyValues_mock[j] = ((2.99792e8)/(.2112*(1. + self.k_values[0][j])))/(1e6)
for j in range(len(k_values_estimate)):
FrequencyValues_model[j] = ((2.99792e8)/(.2112*(1. + k_values_estimate[j])))/(1e6)
splined_mock = interpolate.splrep(FrequencyValues_mock,self.PS_values[0],s=0)
splined_model = interpolate.splrep(FrequencyValues_model,PS_values_estimate,s=0)
FrequencyMin = self.Fiducial_Params['MIN_FREQ']
FrequencyMax = self.Fiducial_Params['MAX_FREQ']
if self.FlagOptions['USE_GS_FIXED_ERROR'] is True:
ErrorOnGlobal = self.Fiducial_Params['CONST_ERROR']
Bandwidth = self.Fiducial_Params['BANDWIDTH']
FrequencyBins = int(np.floor((FrequencyMax-FrequencyMin)/Bandwidth)) + 1
for j in range(FrequencyBins):
FrequencyVal = FrequencyMin + Bandwidth*j
MockPS_val = interpolate.splev(FrequencyVal,splined_mock,der=0)
ModelPS_val = interpolate.splev(FrequencyVal,splined_model,der=0)
total_sum += np.square( (MockPS_val - ModelPS_val)/ErrorOnGlobal )
else:
for j in range(len(self.Error_k_values[0])):
FrequencyVal = ((2.99792e8)/(.2112*(1. + self.Error_k_values[0][j])))/(1e6)
if FrequencyVal >= FrequencyMin and FrequencyVal <= FrequencyMax:
MockPS_val = interpolate.splev(FrequencyVal,splined_mock,der=0)
ModelPS_val = interpolate.splev(FrequencyVal,splined_model,der=0)
total_sum += np.square( (MockPS_val - ModelPS_val)/self.PS_Error[0][j] )
# New in v1.4
#if self.IncludeLF is True:
if self.IncludeLF:
# At the moment I just put the redshift list by hand, but this part should be modified.
#NUM_OF_REDSHIFTS_FOR_LF = 4
for iz in range(len(self.Redshifts_For_LF)):
# Exclude bright-end (Muv < -20) from Lumnosity function
Muv_i = []
phi_i = []
error_i = []
j = 0
while j < len(self.Muv_values[iz]):
if self.Muv_values[iz][j] > -20. and self.Muv_values[iz][j]!=0.:
Muv_i.append(self.Muv_values[iz][j])
phi_i.append(self.phi_values[iz][j])
error_i.append(self.phi_Error[iz][j])
j = j + 1
Muv_values_estimate0 = np.loadtxt('LF_estimate_%s_%s.txt'%(StringArgument_other,self.Redshifts_For_LF[iz]), usecols=(0,))
log10phi_values_estimate0 = np.loadtxt('LF_estimate_%s_%s.txt'%(StringArgument_other,self.Redshifts_For_LF[iz]), usecols=(1,))
Muv_values_estimate = Muv_values_estimate0[::-1]
log10phi_values_estimate = log10phi_values_estimate0[::-1]
LF_criterion = 1 #LF_criteion == 0: skip this chain.
# check whether Muv does not increase monotonically with halo mass. if not interpolation is not possible.
i_check = 0
while i_check < len(Muv_values_estimate)-1:
if (Muv_values_estimate[i_check] > Muv_values_estimate[i_check+1]):
LF_criterion = 0
#print ("Found Muv list reversed\n")
break
i_check = i_check + 1
if (max(Muv_values_estimate) <= min(self.Muv_values[iz])) or (min(Muv_values_estimate) >= max(self.Muv_values[iz])):
LF_criterion = 0
if (LF_criterion == 0):
total_sum = total_sum + 10000000000.
else:
LFestimate_Spline = interpolate.splrep(Muv_values_estimate, log10phi_values_estimate,s=0)
for ii in range(len(Muv_i)):
Muv_i_val = Muv_i[ii]
log10phi_i_val = interpolate.splev(Muv_i_val,LFestimate_Spline,der=0)
#total_sum = total_sum + np.square(phi_i[ii] - 10**(log10phi_i_val)) / (np.square(error_i[ii]))
chi2_i = np.square(phi_i[ii] - 10**(log10phi_i_val)) / (np.square(error_i[ii]))
if (np.isinf(chi2_i)):
chi2_i = 100000.
total_sum = total_sum + chi2_i
else:
if self.IncludeLightCone is True:
# For the light-cone version, the c-code creates a single textfile containing the filenames of each of the light-cone 21cm PS generated. This
# should be of equal or greater length than the number of mock observations added.
LightconePSFilename = 'delTps_lightcone_filenames_%s.txt'%(StringArgument_other)
filename = open('%s'%(LightconePSFilename), 'r')
LightconePS = [line.rstrip('\n') for line in filename]
#nf_vals[0] = 'Walker_%s.txt'%(StringArgument_other)
nf_vals[0] = 0.#'Walker_%s.txt'%(StringArgument_other)
for i in range(len(self.Redshift)):
k_values_estimate = np.loadtxt('%s'%(LightconePS[i]), usecols=(0,))
PS_values_estimate = np.loadtxt('%s'%(LightconePS[i]), usecols=(1,))
Poisson_error_estimate = np.loadtxt('%s'%(LightconePS[i]), usecols=(2,)) # Read possion errors
if self.FlagOptions['KEEP_ALL_DATA'] is True:
if not self.IncludeLF is 2:
if i == 0:
StoredStatisticalData.append(k_values_estimate)
StoredFileLayout.append("{%i}"%(i))
StoredStatisticalData_Error.append(k_values_estimate)
StoredFileLayout_Error.append("{%i}"%(i))
StoredStatisticalData.append(PS_values_estimate)
StoredFileLayout.append("{%i}"%(i+1))
StoredStatisticalData_Error.append(Poisson_error_estimate)
StoredFileLayout_Error.append("{%i}"%(i+1))
else:
for i in range(len(AllRedshifts)):
# Read in the neutral fraction and 21cm PS for this parameter set and redshift
nf_value = np.loadtxt('NeutralFraction_%s_%s.txt'%(StringArgument_other,AllRedshifts[i]), usecols=(0,))
nf_vals[i] = nf_value
# This only reading the data in from file, and then saving it to output
# Yes, I end up reading twice, but whatever...
# (I split it in the case that Redshifts_for_Prior was non-zero)
if not self.IncludeLF is 2:
k_values_estimate = np.loadtxt('delTps_estimate_%s_%s.txt'%(StringArgument_other,AllRedshifts[i]), usecols=(0,))
PS_values_estimate = np.loadtxt('delTps_estimate_%s_%s.txt'%(StringArgument_other,AllRedshifts[i]), usecols=(1,))
Poisson_error_estimate = np.loadtxt('delTps_estimate_%s_%s.txt'%(StringArgument_other,AllRedshifts[i]), usecols=(2,))
if self.FlagOptions['KEEP_ALL_DATA'] is True:
if i == 0:
StoredStatisticalData.append(k_values_estimate)
StoredFileLayout.append("{%i}"%(i))
StoredStatisticalData_Error.append(k_values_estimate)
StoredFileLayout_Error.append("{%i}"%(i))
StoredStatisticalData.append(PS_values_estimate)
StoredFileLayout.append("{%i}"%(i+1))
StoredStatisticalData_Error.append(Poisson_error_estimate)
StoredFileLayout_Error.append("{%i}"%(i+1))
# nf_vals[len(AllRedshifts)] = 'Walker_%s.txt'%(StringArgument_other)
nf_vals[len(AllRedshifts)] = '%s'%(Individual_ID)
nf_vals[len(AllRedshifts)+1] = '%s'%(Individual_ID_2)
# Note here that the usage of len(Redshift) uses the number of mock lightcone 21cm PS if IncludeLightCone was set to True.
for i in range(len(self.Redshift)):
if self.IncludeLightCone is True:
k_values_estimate = np.loadtxt('%s'%(LightconePS[i]), usecols=(0,))
PS_values_estimate = np.loadtxt('%s'%(LightconePS[i]), usecols=(1,))
Poisson_error_estimate = np.loadtxt('%s'%(LightconePS[i]), usecols=(2,)) # Read possion errors
elif not self.IncludeLF is 2:
# Read in the neutral fraction and 21cm PS for this parameter set and redshift
k_values_estimate = np.loadtxt('delTps_estimate_%s_%s.txt'%(StringArgument_other,self.Redshift[i]), usecols=(0,))
PS_values_estimate = np.loadtxt('delTps_estimate_%s_%s.txt'%(StringArgument_other,self.Redshift[i]), usecols=(1,))
Poisson_error_estimate = np.loadtxt('delTps_estimate_%s_%s.txt'%(StringArgument_other,self.Redshift[i]), usecols=(2,))
if not self.IncludeLF is 2:
splined_mock = interpolate.splrep(self.k_values[i],np.log10(self.PS_values[i]),s=0)
splined_error = interpolate.splrep(self.Error_k_values[i],np.log10(self.PS_Error[i]),s=0)
splined_model = interpolate.splrep(k_values_estimate,np.log10(PS_values_estimate),s=0)
splined_model_poisson_err = interpolate.splrep(k_values_estimate,np.log10(Poisson_error_estimate),s=0)
# Interpolating the mock and error PS in log space
for j in range(self.NSplinePoints):
MockPS_val = 10**(interpolate.splev(kSpline[j],splined_mock,der=0))
ErrorPS_val = 10**(interpolate.splev(kSpline[j],splined_error,der=0))
ModelPS_val = 10**(interpolate.splev(kSpline[j],splined_model,der=0))
ModelPE_val = 10**(interpolate.splev(kSpline[j],splined_model_poisson_err,der=0))
# Check if there are any nan values for the 21cm PS
# A nan value implies a IGM neutral fraction of zero, that is, reionisation has completed and thus no 21cm signal
# Set the value of the 21cm PS to zero. Which results in the largest available difference (i.e. if you expect a signal
# (i.e. non zero mock 21cm PS) but have no signal from the sampled model, then want a large difference for the
# chi-squared likelihood).
if np.isnan(ModelPS_val) == True:
ModelPS_val = 0.0
if np.isnan(ModelPE_val) == True:
ModelPE_val = 0.0
if np.isnan(MockPS_val) == True:
MockPS_val = 0.0
#total_sum += np.square((MockPS_val - ModelPS_val)/(np.sqrt(ErrorPS_val**2. + (self.ModUncert*ModelPS_val)**2.)))
total_sum += np.square((MockPS_val - ModelPS_val)/(np.sqrt(ErrorPS_val**2. + (self.ModUncert*ModelPS_val)**2. + ModelPE_val**2)))
# New in v1.4
#if self.IncludeLF is True:
if self.IncludeLF:
# At the moment I just put the redshift list by hand, but this part should be modified.
#NUM_OF_REDSHIFTS_FOR_LF = 4
for iz in range(len(self.Redshifts_For_LF)):
# Exclude bright-end (Muv < -20) from Lumnosity function
Muv_i = []
phi_i = []
error_i = []
j = 0
while j < len(self.Muv_values[iz]):
if self.Muv_values[iz][j] > -20. and self.Muv_values[iz][j]!=0.:
Muv_i.append(self.Muv_values[iz][j])
phi_i.append(self.phi_values[iz][j])
error_i.append(self.phi_Error[iz][j])
j = j + 1
Muv_values_estimate0 = np.loadtxt('LF_estimate_%s_%s.txt'%(StringArgument_other,self.Redshifts_For_LF[iz]), usecols=(0,))
log10phi_values_estimate0 = np.loadtxt('LF_estimate_%s_%s.txt'%(StringArgument_other,self.Redshifts_For_LF[iz]), usecols=(1,))
Muv_values_estimate = Muv_values_estimate0[::-1]
log10phi_values_estimate = log10phi_values_estimate0[::-1]
LF_criterion = 1 #LF_criteion == 0: skip this chain.
# check whether Muv does not increase monotonically with halo mass. if not interpolation is not possible.
i_check = 0
while i_check < len(Muv_values_estimate)-1:
if (Muv_values_estimate[i_check] > Muv_values_estimate[i_check+1]):
LF_criterion = 0
#print ("Found Muv list reversed\n")
break
i_check = i_check + 1
if (max(Muv_values_estimate) <= min(self.Muv_values[iz])) or (min(Muv_values_estimate) >= max(self.Muv_values[iz])):
LF_criterion = 0
if (LF_criterion == 0):
total_sum = total_sum + 10000000000.
else:
LFestimate_Spline = interpolate.splrep(Muv_values_estimate, log10phi_values_estimate,s=0)
for ii in range(len(Muv_i)):
Muv_i_val = Muv_i[ii]
log10phi_i_val = interpolate.splev(Muv_i_val,LFestimate_Spline,der=0)
#total_sum = total_sum + np.square(phi_i[ii] - 10**(log10phi_i_val)) / (np.square(error_i[ii]))
chi2_i = np.square(phi_i[ii] - 10**(log10phi_i_val)) / (np.square(error_i[ii]))
if (np.isinf(chi2_i)):
chi2_i = 100000.
total_sum = total_sum + chi2_i
if self.FlagOptions['KEEP_ALL_DATA'] is True:
StoredFileLayout = string.join(StoredFileLayout,separator_column)
StoredFileLayout_Error = string.join(StoredFileLayout_Error,separator_column)
with open('%s/StatisticalData/TotalPSData_%s.txt'%(self.FlagOptions['KEEP_ALL_DATA_FILENAME'],StringArgument_other),'w') as f:
for x in zip(*StoredStatisticalData):
f.write("%s\n"%(StoredFileLayout).format(*x))
with open('%s/StatisticalData_Error/TotalPS_ErrorData_%s.txt'%(self.FlagOptions['KEEP_ALL_DATA_FILENAME'],StringArgument_other),'w') as f:
for x in zip(*StoredStatisticalData_Error):
f.write("%s\n"%(StoredFileLayout_Error).format(*x))
f.close()
if (self.PriorLegend['PlanckPrior'] is True and number_redshifts > 2) or self.PriorLegend['McGreerPrior'] is True or self.PriorLegend['GreigPrior'] is True or self.FlagOptions['KEEP_ALL_DATA'] is True:
z_Hist = np.loadtxt('AveData_%s.txt'%(StringArgument_other), usecols=(0,))
xH_Hist = np.loadtxt('AveData_%s.txt'%(StringArgument_other), usecols=(1,))
# When the light-cone version is set, the values are writted in decreasing order, not increasing order
# Therefore, reverse to be in increasing order (the interpolation/extrapolation is required to be in increasing order)
if self.IncludeLightCone is True:
if z_Hist[0] > z_Hist[-1]:
z_Hist = z_Hist[::-1]
xH_Hist = xH_Hist[::-1]
if (self.FlagOptions['KEEP_ALL_DATA'] is True or self.PriorLegend['PlanckPrior'] is True) and number_redshifts > 2:
# Mean and one sigma errors for the Planck constraints
# The Planck prior is modelled as a Gaussian: tau = 0.058 \pm 0.012 (https://arxiv.org/abs/1605.03507)
PlanckTau_Mean = 0.058
PlanckTau_OneSigma = 0.012
# Simple linear extrapolation of the redshift range provided by the user, to be able to estimate the optical depth
nZinterp = 15
# The minimum of the extrapolation is chosen to 5.9, to correspond to the McGreer et al. prior on the IGM neutral fraction.
# The maximum is chosed to be z = 18., which is arbitrary.
ZExtrap_min = 5.9
ZExtrap_max = 20.0
ZExtrapVals = np.zeros(nZinterp)
XHI_ExtrapVals = np.zeros(nZinterp)
# Perform only a linear interpolation/extrapolation
order = 1
# The linear interpolation/extrapolation function, taking as input the redshifts supplied by the user and the corresponding neutral fractions
# recovered for the specific EoR parameter set
LinearInterpolationFunction = InterpolatedUnivariateSpline(z_Hist, xH_Hist, k=order)
for i in range(nZinterp):
ZExtrapVals[i] = ZExtrap_min + (ZExtrap_max - ZExtrap_min)*float(i)/(nZinterp - 1)
XHI_ExtrapVals[i] = LinearInterpolationFunction(ZExtrapVals[i])
# Ensure that the neutral fraction does not exceed unity, or go negative
if XHI_ExtrapVals[i] > 1.0:
XHI_ExtrapVals[i] = 1.0
if XHI_ExtrapVals[i] < 0.0:
XHI_ExtrapVals[i] = 0.0
# Set up the arguments for calculating the estimate of the optical depth. Once again, performed using command line code.
separator_Planck = " "
seq_Planck = []
for i in range(nZinterp):
seq_Planck.append("%s"%(ZExtrapVals[i]))
seq_Planck.append("%s"%(XHI_ExtrapVals[i]))
StringArgument_Planck = string.join(seq_Planck,separator_Planck)
# Perform the computation of tau
command = './ComputingTau_e %s %s %s'%(Individual_ID,Decimal(repr(params[0])).quantize(SIXPLACES),StringArgument_Planck)
os.system(command)
# Read tau from file
tau_value = np.loadtxt('Tau_e_%s_%s.txt'%(Individual_ID,Decimal(repr(params[0])).quantize(SIXPLACES)), usecols=(0,))
# remove the temporary files
if self.FlagOptions['KEEP_ALL_DATA'] is True:
command = "mv Tau_e_%s_%s.txt %s/TauData/"%(Individual_ID,Decimal(repr(params[0])).quantize(SIXPLACES),self.FlagOptions['KEEP_ALL_DATA_FILENAME'])
else:
command = "rm Tau_e_%s_%s.txt"%(Individual_ID,Decimal(repr(params[0])).quantize(SIXPLACES))
os.system(command)
# As the likelihood is computed in log space, the addition of the prior is added linearly to the existing chi^2 likelihood
if self.PriorLegend['PlanckPrior'] is True:
total_sum = total_sum + np.square( ( PlanckTau_Mean - tau_value )/(PlanckTau_OneSigma) )
#if self.IncludeLightCone is True:
# nf_vals[1] = tau_value
#else:
# # it is len(AllRedshifts) as the indexing begins at zero
# nf_vals[len(AllRedshifts)+2] = tau_value
nf_vals[len(AllRedshifts)+2] = tau_value
if self.PriorLegend['McGreerPrior'] is True:
# Mean and one sigma errors for the McGreer et al. constraints
# Modelled as a flat, unity prior at x_HI <= 0.06, and a one sided Gaussian at x_HI > 0.06 ( Gaussian of mean 0.06 and one sigma of 0.05 )
McGreer_Mean = 0.06
McGreer_OneSigma = 0.05
if McGreer_Redshift in z_Hist:
for i in range(len(z_Hist)):
if z_Hist[i] == McGreer_Redshift:
McGreer_NF = xH_Hist[i]
if McGreer_NF > 1.:
McGreer_NF = 1.
if McGreer_NF < 0.:
McGreer_NF = 0.
# As the likelihood is computed in log space, the addition of the prior is added linearly to the existing chi^2 likelihood
if McGreer_NF <= 0.06:
total_sum = total_sum + 0.0 # Add zero, as we assume flat (unity) probability at x_HI <= 0.06 (as it is a lower limit)
else:
total_sum = total_sum + np.square( ( McGreer_Mean - McGreer_NF )/(McGreer_OneSigma) )
elif number_redshifts > 2:
# Perform only a linear interpolation/extrapolation
order = 1
# The linear interpolation/extrapolation function, taking as input the redshifts supplied by the user and the corresponding neutral fractions
# recovered for the specific EoR parameter set
LinearInterpolationFunction = InterpolatedUnivariateSpline(z_Hist, xH_Hist, k=order)
McGreer_NF = LinearInterpolationFunction(McGreer_Redshift)
if McGreer_NF > 1.:
McGreer_NF = 1.
if McGreer_NF < 0.:
McGreer_NF = 0.
# As the likelihood is computed in log space, the addition of the prior is added linearly to the existing chi^2 likelihood
if McGreer_NF <= 0.06:
total_sum = total_sum + 0.0 # Add zero, as we assume flat (unity) probability at x_HI <= 0.06 (as it is a lower limit)
else:
total_sum = total_sum + np.square( ( McGreer_Mean - McGreer_NF )/(McGreer_OneSigma) )
if self.PriorLegend['GreigPrior'] is True:
# Interpolate the QSO damping wing PDF
spline_QSODampingPDF = interpolate.splrep(self.NFValsQSO,self.PDFValsQSO,s=0)
if QSO_Redshift in z_Hist:
for i in range(len(z_Hist)):
if z_Hist[i] == QSO_Redshift:
NF_QSO = xH_Hist[i]
# Ensure that the neutral fraction does not exceed unity, or go negative
if NF_QSO > 1.0:
NF_QSO = 1.0
if NF_QSO < 0.0:
NF_QSO = 0.0
QSO_Prob = interpolate.splev(NF_QSO,spline_QSODampingPDF,der=0)
# Interpolating the PDF from the QSO damping wing might cause small negative values at the edges (i.e. x_HI ~ 0 or ~1)
# In case it is zero, or negative, set it to a very small non zero number (we take the log of this value, it cannot be zero)
if QSO_Prob <= 0.0:
QSO_Prob = 0.000006
# We work with the log-likelihood, therefore convert the IGM Damping wing PDF to log space
QSO_Prob = -2.*np.log(QSO_Prob)
total_sum = total_sum + QSO_Prob
elif number_redshifts > 2:
order = 1
# Check the redshift range input by the user to determine whether to interpolate or extrapolate the IGM neutral fraction to the QSO redshift
if QSO_Redshift < np.amin(self.Redshift):
# The QSO redshift is outside the range set by the user. Need to extrapolate the reionisation history to obtain the neutral fraction at the QSO redshift
# The linear interpolation/extrapolation function, taking as input the redshifts supplied by the user and the corresponding neutral fractions
# recovered for the specific EoR parameter set
LinearInterpolationFunction = InterpolatedUnivariateSpline(self.Redshift, nf_vals, k=order)
NF_QSO = LinearInterpolationFunction(QSO_Redshift)
else:
# The QSO redshift is within the range set by the user. Can interpolate the reionisation history to obtain the neutral fraction at the QSO redshift
spline_reionisationhistory = interpolate.splrep(self.Redshift,nf_vals,s=0)
NF_QSO = interpolate.splev(QSO_Redshift,spline_reionisationhistory,der=0)
# Ensure that the neutral fraction does not exceed unity, or go negative
if NF_QSO > 1.0:
NF_QSO = 1.0
if NF_QSO < 0.0:
NF_QSO = 0.0
QSO_Prob = interpolate.splev(NF_QSO,spline_QSODampingPDF,der=0)
# Interpolating the PDF from the QSO damping wing might cause small negative values at the edges (i.e. x_HI ~ 0 or ~1)
# In case it is zero, or negative, set it to a very small non zero number (we take the log of this value, it cannot be zero)
if QSO_Prob <= 0.0:
QSO_Prob = 0.000006
# We work with the log-likelihood, therefore convert the IGM Damping wing PDF to log space
QSO_Prob = -2.*np.log(QSO_Prob)
total_sum = total_sum + QSO_Prob
if self.IncludeLightCone is True:
if self.FlagOptions['KEEP_GLOBAL_DATA'] is True:
LightconePSFilename = 'delTps_lightcone_filenames_%s.txt'%(StringArgument_other)
filename = open('%s'%(LightconePSFilename), 'r')
LightconePS = [line.rstrip('\n') for line in filename]
if self.FlagOptions['KEEP_ALL_DATA'] is True:
command = "mv %s %s/StatisticalData/"%(LightconePSFilename,self.FlagOptions['KEEP_ALL_DATA_FILENAME'])
else:
command = "rm %s"%(LightconePSFilename)
os.system(command)
# Removal of the individual light cone files is done here as in principle these can exceed the number of mock observations provided
for i in range(len(LightconePS)):
command = "rm %s"%(LightconePS[i])
os.system(command)
if self.FlagOptions['KEEP_ALL_DATA'] is True:
for j in range(len(self.Redshifts_For_LF)):
command = "mv LF_estimate_%s_%s.txt %s/LFData/"%(StringArgument_other,self.Redshifts_For_LF[j],self.FlagOptions['KEEP_ALL_DATA_FILENAME'])
os.system(command)
else:
for j in range(len(self.Redshifts_For_LF)):
command = "rm LF_estimate_%s_%s.txt"%(StringArgument_other,self.Redshifts_For_LF[j])
os.system(command)
else:
if not self.IncludeLF is 2:
command = "rm delTps_estimate_%s_*"%(StringArgument_other)
os.system(command)
command = "rm NeutralFraction_%s_*"%(StringArgument_other)
os.system(command)
if self.FlagOptions['KEEP_ALL_DATA'] is True:
for j in range(len(self.Redshifts_For_LF)):
command = "mv LF_estimate_%s_%s.txt %s/LFData/"%(StringArgument_other,self.Redshifts_For_LF[j],self.FlagOptions['KEEP_ALL_DATA_FILENAME'])
os.system(command)
else:
for j in range(len(self.Redshifts_For_LF)):
command = "rm LF_estimate_%s_%s.txt"%(StringArgument_other,self.Redshifts_For_LF[j])
os.system(command)
if OutputGlobalAve == 1:
if self.FlagOptions['KEEP_ALL_DATA'] is True:
command = "mv AveData_%s.txt %s/AveData/"%(StringArgument_other,self.FlagOptions['KEEP_ALL_DATA_FILENAME'])
else:
command = "rm AveData_%s.txt"%(StringArgument_other)
os.system(command)
if self.FlagOptions['KEEP_ALL_DATA'] is True:
command = "mv Walker_%s.txt %s/WalkerData"%(StringArgument_other,self.FlagOptions['KEEP_ALL_DATA_FILENAME'])
os.system(command)
command = "mv WalkerCosmology_%s.txt %s/WalkerData"%(StringArgument_other,self.FlagOptions['KEEP_ALL_DATA_FILENAME'])
os.system(command)
else:
command = "rm Walker_%s.txt"%(StringArgument_other)
os.system(command)
command = "rm WalkerCosmology_%s.txt"%(StringArgument_other)
os.system(command)
if(np.isinf(total_sum)):
total_sum = 10000000000.
return -0.5*total_sum,nf_vals
def computeLikelihood(self, ctx):
return self.Likelihood(ctx)
def setup(self):
print "Likelihood Fitting for 21cm Fast"
| BradGreig/21CMMC | 21CMMC_SourceCode/Programs/CosmoHammer_21CMMC/likelihood/module/Likelihood21cmFast.py | Python | gpl-2.0 | 45,441 | [
"Gaussian"
] | 46bea627ed1d74810354240715ebfd7a62937b35429198a8e49306e958c0e715 |
#!/usr/bin/env python
""" Create a DIRAC MoveReplica request to be executed by the RMS
"""
__RCSID__ = "$Id $"
import os
from hashlib import md5
import time
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[0],
__doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... sourceSE LFN targetSE1 [targetSE2 ...]' % Script.scriptName,
'Arguments:',
' sourceSE: source SE',
' targetSE: target SE',
' LFN: LFN or file containing a List of LFNs' ] ) )
def getLFNList( arg ):
""" get list of LFNs """
lfnList = []
if os.path.exists( arg ):
lfnList = [line.split()[0] for line in open( arg ).read().splitlines()]
else:
lfnList = [ arg ]
return list( set ( lfnList ) )
# # execution
if __name__ == "__main__":
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
import DIRAC
from DIRAC import gLogger
args = Script.getPositionalArgs()
if len( args ) < 3:
Script.showHelp()
sourceSE = args[0]
lfnList = getLFNList( args[1] )
targetSEs = list( set( [ se for targetSE in args[2:] for se in targetSE.split( ',' ) ] ) )
gLogger.info( "Will create request with 'MoveReplica' "\
"operation using %s lfns and %s target SEs" % ( len( lfnList ), len( targetSEs ) ) )
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.List import breakListIntoChunks
lfnChunks = breakListIntoChunks( lfnList, 100 )
multiRequests = len( lfnChunks ) > 1
error = 0
count = 0
reqClient = ReqClient()
fc = FileCatalog()
for lfnChunk in lfnChunks:
metaDatas = fc.getFileMetadata( lfnChunk )
if not metaDatas["OK"]:
gLogger.error( "unable to read metadata for lfns: %s" % metaDatas["Message"] )
error = -1
continue
metaDatas = metaDatas["Value"]
for failedLFN, reason in metaDatas["Failed"].items():
gLogger.error( "skipping %s: %s" % ( failedLFN, reason ) )
lfnChunk = set( metaDatas["Successful"] )
if not lfnChunk:
gLogger.error( "LFN list is empty!!!" )
error = -1
continue
if len( lfnChunk ) > Operation.MAX_FILES:
gLogger.error( "too many LFNs, max number of files per operation is %s" % Operation.MAX_FILES )
error = -1
continue
count += 1
request = Request()
request.RequestName = "%s_%s" % ( md5( repr( time.time() ) ).hexdigest()[:16], md5( repr( time.time() ) ).hexdigest()[:16] )
moveReplica = Operation()
moveReplica.Type = 'MoveReplica'
moveReplica.SourceSE = sourceSE
moveReplica.TargetSE = ",".join( targetSEs )
for lfn in lfnChunk:
metaDict = metaDatas["Successful"][lfn]
opFile = File()
opFile.LFN = lfn
opFile.Size = metaDict["Size"]
if "Checksum" in metaDict:
# # should check checksum type, now assuming Adler32 (metaDict["ChecksumType"] = 'AD'
opFile.Checksum = metaDict["Checksum"]
opFile.ChecksumType = "ADLER32"
moveReplica.addFile( opFile )
request.addOperation( moveReplica )
result = reqClient.putRequest( request )
if not result["OK"]:
gLogger.error( "Failed to submit Request: %s" % ( result["Message"] ) )
error = -1
continue
if not multiRequests:
gLogger.always( "Request %d submitted successfully" % result['Value'] )
if multiRequests:
gLogger.always( "%d requests have been submitted" % ( count ) )
DIRAC.exit( error )
| andresailer/DIRAC | DataManagementSystem/scripts/dirac-dms-move-replica-request.py | Python | gpl-3.0 | 4,031 | [
"DIRAC"
] | 8be5577d5cb19fa3729c2a13663e809ba16938c04d8d781b08b18ca6d7f8b26b |
#!/usr/bin/env python
import numpy as np
import boldsim.sim as sim
import timeit
import argparse
parser = argparse.ArgumentParser(description='Run BOLDsim benchmarks.',
epilog='By default no benchmarks will be run, so make sure to specify one!')
parser.add_argument('-a', '--all', action='store_true', help='Run the full suite of benchmarks')
parser.add_argument('-v','--sim-vol', action='store_true', help='Run the simVOL benchmarks')
parser.add_argument('-t','--sim-ts', action='store_true', help='Run the simTS benchmarks')
parser.add_argument('-n', '--noise', action='store_true', help='Run the full suite of noise benchmarks')
parser.add_argument('--stimfunc', action='store_true', help='Run the stimfunction benchmarks')
parser.add_argument('--specdesign', action='store_true', help='Run the specifydesign benchmarks')
parser.add_argument('--specregion', action='store_true', help='Run the specifyregion benchmarks')
parser.add_argument('--systemnoise', action='store_true', help='Run the systemnoise benchmarks')
parser.add_argument('--lowfreq', action='store_true', help='Run the lowfreqnoise benchmarks')
parser.add_argument('--phys', action='store_true', help='Run the physnoise benchmarks')
parser.add_argument('--task-related', action='store_true', help='Run the task-related noise benchmarks')
parser.add_argument('--temporal', action='store_true', help='Run the temporally correlated noise benchmarks')
parser.add_argument('--spatial', action='store_true', help='Run the spatially correlated noise benchmarks')
def run_test(title, test, setup, repeats=10):
print '{:>60}'.format(title+':'),
x = timeit.Timer(test, setup=setup).repeat(repeats,1)
print '{:>9.3f} {:>9.3f} {:>9.3f} {:9d}'.format(min(x), np.mean(x), max(x), repeats)
def print_header(title):
print '\n{:<60}'.format(title)
print '{:<61}{:>9} {:>9} {:>9} {:>9}'.format('', 'min','mean','max','reps')
setup = """
import numpy as np
from boldsim import sim
"""
really_slow = 1
slow = 3
normal = 10
#################
def benchmark_stimfunction():
print_header('Stimfunction benchmarks')
run_test('Few events, 200 TRs',
"s = sim.stimfunction(400, [2, 12], 2, .1)",
setup=setup)
run_test('More events, 200 TRs',
"s = sim.stimfunction(400, [1,2,3,4,5,6,7,8,9,10,11,12,13,14], 2, .1)",
setup=setup)
run_test('More events, 2000 TRs',
"s = sim.stimfunction(4000, onsets, 2, .1)",
setup=setup + """
onsets = np.arange(1995)
""")
##################
def benchmark_specifydesign():
print_header('Specifydesign benchmarks')
setup_short_few = setup + """
total_time = 400
onsets = [[0, 30, 60], [25, 75]]
dur = [[10],[3]]
effect_sizes = [3, 10]
acc = .5
"""
setup_long_few = setup + """
total_time = 4000
onsets = [[0, 30, 60], [550, 650]]
dur = [[10],[3]]
effect_sizes = [3, 10]
acc = .5
"""
setup_long_many = setup + """
total_time = 4000
onsets = [np.arange(1,979,20), np.arange(2,979,19)]
dur = [[10],[3]]
effect_sizes = [3, 10]
acc = .5
"""
setup_long_many_conds = setup + """
total_time = 4000
onsets = [np.arange(1,979,20), np.arange(2,979,19),
np.arange(3,979,18), np.arange(4,979,17),
np.arange(5,979,16), np.arange(6,979,15),
np.arange(7,979,14), np.arange(8,979,13),
np.arange(9,979,12), np.arange(10,979,11),
]
dur = [[10],[9],[8],[7],[6],[5],[4],[3],[2],[1]]
effect_sizes = [3, 10, 2, 6, 7, 1, 5, 4, 9, 8]
acc = .5
"""
titles = ['2 conds, 200 TRs, few events, {}',
'2 conds, 2000 TRs, few events, {}',
'2 conds, 2000 TRs, many events, {}',
'10 conds, 2000 TRs, many events, {}',
]
setups = [setup_short_few, setup_long_few, setup_long_many, setup_long_many_conds]
for hrf in ['none','gamma','double-gamma']:
for title, specific_setup in zip(titles, setups):
run_test(title.format(hrf),
"""d = sim.specifydesign(total_time, onsets=onsets, durations=dur, accuracy=acc,
effect_sizes=effect_sizes, TR=2, conv='{}')""".format(hrf),
setup=specific_setup, repeats=normal)
def benchmark_specifyregion():
print_header('Specifyregion benchmarks')
for shape in ['cube','sphere','manual']:
run_test('Toy space [10x10], 2 regions, {}'.format(shape),
"""s = sim.specifyregion(dim=[10,10],coord=[[1,1],[7,7]],radius=[1,2],
form='{}', fading=0)""".format(shape),
setup=setup, repeats=normal)
run_test('Highres Slice [256x256], 2 regions, {}'.format(shape),
"""s = sim.specifyregion(dim=[256,256],coord=[[1,1],[240,240]],radius=[13,22],
form='{}', fading=0)""".format(shape),
setup=setup, repeats=normal)
for fade in [0, .5, 1]:
run_test('Highres slice [256x256], 50 regions, fade {}, {}'.format(fade, shape),
"""s = sim.specifyregion(dim=[256,256],coord=coords,radius=radii,
form='{}', fading={})""".format(shape,fade),
repeats=slow, setup=setup + """
coords = [[np.random.randint(low=0, high=255),
np.random.randint(low=0, high=255)] for x in range(50)]
radii = [np.random.randint(low=1, high=20) for x in range(50)]
""")
for fade in [0, .5, 1]:
run_test('Wholebrain [64x64x32], 50 regions, fade {}, {}'.format(fade, shape),
"""s = sim.specifyregion(dim=[64,64,32],coord=coords,radius=radii,
form='{}', fading={})""".format(shape, fade),
repeats=really_slow, setup=setup + """
coords = [[np.random.randint(low=0, high=63),
np.random.randint(low=0, high=63),
np.random.randint(low=0, high=31)] for x in range(50)]
radii = [np.random.randint(low=1, high=20) for x in range(50)]""")
def benchmark_systemnoise():
print_header('System noise benchmarks')
for noise_type in ['gaussian','rayleigh']:
run_test('Single voxel, 200 TRs, {}'.format(noise_type),
"""s = sim.system_noise(nscan=200, sigma=1.5,
dim=(1,), noise_dist='{}')""".format(noise_type),
setup=setup, repeats=normal)
run_test('Single voxel, 200000 TRs, {}'.format(noise_type),
"""s = sim.system_noise(nscan=200000, sigma=1.5,
dim=(1,), noise_dist='{}')""".format(noise_type),
setup=setup, repeats=normal)
run_test('Wholebrain voxels [64x64x32], 200 TRs, {}'.format(noise_type),
"""s = sim.system_noise(nscan=200, sigma=1.5,
dim=(64,64,32), noise_dist='{}')""".format(noise_type),
setup=setup, repeats=slow)
def benchmark_lowfreqnoise():
print_header('Low frequency noise benchmarks')
run_test('Single voxel, 200 TRs',
'noise = sim.lowfreqdrift(nscan=200, freq=128.0, dim=(1,))',
setup=setup, repeats=normal)
run_test('Single voxel, 2000 TRs',
'noise = sim.lowfreqdrift(nscan=2000, freq=128.0, dim=(1,))',
setup=setup, repeats=normal)
run_test('Whole slice [64x64], 200 TRs',
'noise = sim.lowfreqdrift(nscan=200, freq=128.0, dim=(64,64))',
setup=setup, repeats=really_slow)
run_test('Wholebrain [64x64x32], 200 TRs',
'noise = sim.lowfreqdrift(nscan=200, freq=128.0, dim=(64,64,32))',
setup=setup, repeats=really_slow)
def benchmark_physnoise():
print_header('Physiological noise benchmarks')
run_test('Single voxel, 200 TRs',
'noise = sim.physnoise(nscan=200, dim=(1,))',
setup=setup)
run_test('Single voxel, 2000 TRs',
'noise = sim.physnoise(nscan=2000, dim=(1,))',
setup=setup)
run_test('Wholebrain [64x64x32], 200 TRs',
'noise = sim.physnoise(nscan=200, dim=(64,64,32))',
setup=setup)
def benchmark_tasknoise():
print_header('Task-related noise benchmarks')
run_test('Single voxel, 200 TRs',
'noise = sim.tasknoise(design=d, dim=(1,))',
repeats=normal, setup=setup+"""
total_time = 400
onsets = [[0, 30, 60], [25, 75]]
dur = [[1],[3]]
effect_sizes = [1, 2]
acc = .5
d = sim.specifydesign(total_time, onsets=onsets, durations=dur, accuracy=acc,
effect_sizes=effect_sizes, conv='double-gamma', TR=2)
""")
run_test('Single voxel, 2000 TRs',
'noise = sim.tasknoise(design=d, dim=(1,))',
repeats=normal, setup=setup+"""
total_time = 4000
onsets = [[0, 30, 60], [550, 650]]
dur = [[10],[3]]
effect_sizes = [3, 10]
acc = .5
d = sim.specifydesign(total_time, onsets=onsets, durations=dur, accuracy=acc,
effect_sizes=effect_sizes, conv='double-gamma', TR=2)
""")
run_test('Wholebrain [64x64x32], 200 TRs',
'noise = sim.tasknoise(design=d, dim=(64,64,32))',
repeats=slow, setup=setup+"""
total_time = 400
onsets = [np.arange(1,370,20), np.arange(2,379,19),
np.arange(3,370,18), np.arange(4,379,17),
np.arange(5,379,16), np.arange(6,379,15),
np.arange(7,379,14), np.arange(8,379,13),
np.arange(9,379,12), np.arange(10,379,11),
]
dur = [[10],[9],[8],[7],[6],[5],[4],[3],[2],[1]]
effect_sizes = [3, 10, 2, 6, 7, 1, 5, 4, 9, 8]
acc = .5
d = sim.specifydesign(total_time, onsets=onsets, durations=dur, accuracy=acc,
effect_sizes=effect_sizes, conv='double-gamma', TR=2)
""")
def benchmark_temporalnoise():
print_header('Temporally correlated noise benchmarks')
run_test('Single Voxel, 200 TRs, AR(1)',
'noise = sim.temporalnoise(dim=(1,),nscan=200, ar_coef=[.2])',
setup=setup, repeats=normal)
run_test('Single Voxel, 200 TRs, AR(3)',
'noise = sim.temporalnoise(dim=(1,),nscan=200, ar_coef=[.3, .2, .4])',
setup=setup, repeats=normal)
run_test('Single Voxel, 2000 TRs, AR(1)',
'noise = sim.temporalnoise(dim=(1,),nscan=2000, ar_coef=[.2])',
setup=setup, repeats=normal)
run_test('Single Voxel, 2000 TRs, AR(3)',
'noise = sim.temporalnoise(dim=(1,),nscan=2000, ar_coef=[.3, .2, .4])',
setup=setup, repeats=normal)
run_test('Whole slice [64x64], 200 TRs, AR(1)',
'noise = sim.temporalnoise(dim=(64,64),nscan=200, ar_coef=[.2])',
setup=setup, repeats=slow)
run_test('Whole slice [64x64x32], 200 TRs, AR(3)',
'noise = sim.temporalnoise(dim=(64,64),nscan=200, ar_coef=[.3, .2, .4])',
setup=setup, repeats=slow)
run_test('Wholebrain [64x64x32], 200 TRs, AR(1)',
'noise = sim.temporalnoise(dim=(64,64,32),nscan=200, ar_coef=[.2])',
setup=setup, repeats=really_slow)
run_test('Wholebrain [64x64x32], 200 TRs, AR(3)',
'noise = sim.temporalnoise(dim=(64,64,32),nscan=200, ar_coef=[.3, .2, .4])',
setup=setup, repeats=really_slow)
def benchmark_spatialnoise():
print_header('Spatial noise benchmarks')
run_test('Toy dim [10x10], 1 TR',
'noise = sim.spatialnoise(dim=(10,10),nscan=1,rho=.7)',
setup=setup, repeats=normal)
run_test('Whole slice [64x64], 1 TR',
'noise = sim.spatialnoise(dim=(64,64),nscan=1,rho=.7)',
setup=setup, repeats=normal)
run_test('Hires slice [256x256], 1 TR',
'noise = sim.spatialnoise(dim=(256,256),nscan=1,rho=.7)',
setup=setup, repeats=normal)
run_test('Toy dim [10x10x10], 1 TR',
'noise = sim.spatialnoise(dim=(10,10,10),nscan=1,rho=.7)',
setup=setup, repeats=normal)
run_test('Hires [256x256x128], 1 TR',
'noise = sim.spatialnoise(dim=(256,256,128),nscan=1,rho=.7)',
setup=setup, repeats=slow)
run_test('Toy dim [10x10], 200 TRs',
'noise = sim.spatialnoise(dim=(10,10),nscan=200,rho=.7)',
setup=setup, repeats=normal)
run_test('Whole slice [64x64], 200 TRs',
'noise = sim.spatialnoise(dim=(64,64),nscan=200,rho=.7)',
setup=setup, repeats=slow)
run_test('Wholebrain [64x64x32], 200 TRs',
'noise = sim.spatialnoise(dim=(64,64,32),nscan=200,rho=.7)',
setup=setup, repeats=really_slow)
def benchmark_simTS():
print_header('Sim TS benchmarks')
for noise_type in ['none','white','temporal','low-freq','phys','task-related','mixture']:
run_test('Single Voxel, 200 TRs, {}'.format(noise_type),
"""
design = sim.simprepTemporal(total_time=400,onsets=[[1,41, 81, 121, 161],
[15, 55, 95, 135, 175]],
durations=[[20],[7]],
effect_sizes=[1], conv='double-gamma')
ts = sim.simTSfmri(design, noise='{}')""".format(noise_type),
setup=setup, repeats=normal)
for noise_type in ['none','white','temporal','low-freq','phys','task-related','mixture']:
run_test('Single Voxel, 2000 TRs, {}'.format(noise_type),
"""
design = sim.simprepTemporal(total_time=4000,onsets=[[1,41, 81, 121, 161],
[15, 55, 95, 135, 175]],
durations=[[20],[7]],
effect_sizes=[1], conv='double-gamma')
ts = sim.simTSfmri(design, noise='{}')""".format(noise_type),
setup=setup, repeats=normal)
def benchmark_simVOL():
print_header('Sim VOL benchmarks')
for noise_type in ['none','white','temporal','low-freq','phys','task-related','mixture']:
run_test('Toy Dim [10x10], 200 TRs, {}'.format(noise_type),
"""
design = sim.simprepTemporal(total_time=400,onsets=[[1,41, 81, 121, 161],
[15, 55, 95, 135, 175]],
durations=[[20],[7]], TR=2,
effect_sizes=[1], conv='double-gamma')
image = sim.simprepSpatial(regions=3,
coord=[[1,1],[5,5],[6,0]],
radius=[1,2,0],
form=['cube','sphere','cube'],
fading=[.5, 0, 0])
sim_ds = sim.simVOLfmri(designs=design,
images=image,
noise='{}',
base=10,
dim=[10,10],
SNR=2)
""".format(noise_type),
setup=setup, repeats=normal)
for noise_type in ['none','white','temporal','low-freq','phys','task-related','mixture']:
run_test('Whole slice [64x64], 200 TRs, {}'.format(noise_type),
"""
design = sim.simprepTemporal(total_time=400,onsets=[[1,41, 81, 121, 161],
[15, 55, 95, 135, 175]],
durations=[[20],[7]], TR=2,
effect_sizes=[1], conv='double-gamma')
image = sim.simprepSpatial(regions=3,
coord=[[1,1],[5,5],[6,0]],
radius=[1,2,0],
form=['cube','sphere','cube'],
fading=[.5, 0, 0])
sim_ds = sim.simVOLfmri(designs=design,
images=image,
noise='{}',
base=10,
dim=[64,64],
SNR=2)
""".format(noise_type),
setup=setup, repeats=slow)
for noise_type in ['none','white','temporal','low-freq','phys','task-related','spatial','mixture']:
run_test('Wholebrain [64x64x32], 200 TRs, {}'.format(noise_type),
"""
design = sim.simprepTemporal(total_time=400,onsets=[[1,41, 81, 121, 161],
[15, 55, 95, 135, 175]],
durations=[[20],[7]], TR=2,
effect_sizes=[1], conv='double-gamma')
image = sim.simprepSpatial(regions=3,
coord=[[1,1,1],[5,5,5],[6,0,0]],
radius=[1,2,0],
form=['cube','sphere','cube'],
fading=[.5, 0, 0])
sim_ds = sim.simVOLfmri(designs=design,
images=image,
noise='{}',
base=10,
dim=[64,64,32],
SNR=2)
""".format(noise_type),
setup=setup, repeats=really_slow)
def run_benchmarks(args):
if args['stimfunc']:
benchmark_stimfunction()
if args['specdesign']:
benchmark_specifydesign()
if args['specregion']:
benchmark_specifyregion()
if args['systemnoise']:
benchmark_systemnoise()
if args['lowfreq']:
benchmark_lowfreqnoise()
if args['phys']:
benchmark_physnoise()
if args['task_related']:
benchmark_tasknoise()
if args['temporal']:
benchmark_temporalnoise()
if args['spatial']:
benchmark_spatialnoise()
if args['sim_ts']:
benchmark_simTS()
if args['sim_vol']:
benchmark_simVOL()
if __name__ == "__main__":
args = parser.parse_args()
args = vars(args)
if args['all']:
for key in args.keys():
args[key] = True
if args['noise']:
for key in ['systemnoise','lowfreq','phys','task_related',
'temporal','spatial']:
args[key] = True
run_benchmarks(args)
| adamatus/boldsim | benchmark/benchmark.py | Python | gpl-2.0 | 17,981 | [
"Gaussian"
] | 24ff728304c9ebe14e84cbfd6bae61f7fdeab9fbda56355c4bf2c53c519b9622 |
from rdkit.ML import FeatureSelect as FS
from rdkit import DataStructs as DS
from rdkit import RDConfig
import unittest
class TestCase(unittest.TestCase):
def setUp(self) :
pass
def test0FromList(self) :
examples = []
bv = DS.ExplicitBitVect(5)
bv.SetBitsFromList([0,2,4])
examples.append([0,bv,0])
bv = DS.ExplicitBitVect(5)
bv.SetBitsFromList([0,2,4])
examples.append([0,bv,0])
bv = DS.ExplicitBitVect(5)
bv.SetBitsFromList([0,3,4])
examples.append([0,bv,1])
bv = DS.ExplicitBitVect(5)
bv.SetBitsFromList([0,2,4])
examples.append([0,bv,0])
bv = DS.ExplicitBitVect(5)
bv.SetBitsFromList([0,2])
examples.append([0,bv,1])
r = FS.selectCMIM(examples,2)
self.failUnlessEqual(r,(2,4))
r = FS.selectCMIM(examples,1)
self.failUnlessEqual(r,(2,))
r = FS.selectCMIM(examples,3)
self.failUnlessEqual(r,(2,4,-1))
def test1FromList(self) :
examples = []
bv = DS.SparseBitVect(5)
bv.SetBitsFromList([0,2,4])
examples.append([0,bv,0])
bv = DS.SparseBitVect(5)
bv.SetBitsFromList([0,2,4])
examples.append([0,bv,0])
bv = DS.SparseBitVect(5)
bv.SetBitsFromList([0,3,4])
examples.append([0,bv,1])
bv = DS.SparseBitVect(5)
bv.SetBitsFromList([0,2,4])
examples.append([0,bv,0])
bv = DS.SparseBitVect(5)
bv.SetBitsFromList([0,2])
examples.append([0,bv,1])
r = FS.selectCMIM(examples,2)
self.failUnlessEqual(r,(2,4))
r = FS.selectCMIM(examples,1)
self.failUnlessEqual(r,(2,))
r = FS.selectCMIM(examples,3)
self.failUnlessEqual(r,(2,4,-1))
if __name__ == '__main__':
unittest.main()
| rdkit/rdkit-orig | Code/ML/FeatureSelect/Wrap/testCMIM.py | Python | bsd-3-clause | 1,730 | [
"RDKit"
] | b0c26b30ca9d8f5677e0ee98cb56eeed00af74d34ce7234f11b954553f1a217f |
import numpy as np
import catmap
import re
from copy import copy
from ase.atoms import string2symbols
import warnings
def get_composition(species_string):
"""
Convert string of species into a dictionary of species and the number of each species.
:param species_string: A string of the reaction species. Should be a chemical formula string
that may also contain '-','&',or,'pe'. 'pe' is a special case corresponding
to a proton-electron pair and has the compositon of H, while ele corresponds to an electron and has no associated atoms.
:type species: str
"""
composition = {}
# clean up transition states and electrochem
species_string = species_string.replace('-','')
species_string = species_string.replace('pe','H')
species_string = species_string.replace('&','')
species_string = species_string.replace('ele','')
try:
symbs = string2symbols(species_string)
for a in set(symbs):
composition[a] = symbs.count(a)
except ValueError:
composition = None
return composition
def cartesian_product(*args, **kwds):
"""
Take the Cartesian product
.. todo:: Explain what the args and kwds are
"""
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
def convert_formation_energies(energy_dict,atomic_references,composition_dict):
"""
Convert dictionary of energies, atomic references and compositions into a dictionary of formation energies
:param energy_dict: Dictionary of energies for all species.
Keys should be species names and values
should be energies.
:type energy_dict: dict
:param atomic_references: Dictionary of atomic reference energies (?)
:type atomic_references: dict
:param composition_dict: Dictionary of compositions
:type composition_dict: dict
.. todo:: Explain the keys and values for energy_dict, atomic_references, and composition_dict
"""
n = len(atomic_references)
R = np.zeros((n,n))
e = []
ref_offsets = {}
atoms = sorted(atomic_references)
for i,a in enumerate(atoms):
composition = composition_dict[atomic_references[a]]
e.append(energy_dict[atomic_references[a]])
for j,a in enumerate(atoms):
n_a = composition.get(a,0)
R[i,j] = n_a
if not np.prod([R[i,i] for i in range(0,n)]):
raise ValueError('Reference set is not valid.')
e = np.array(e)
try:
R_inv = np.linalg.solve(R,np.eye(n))
except np.linalg.linalg.LinAlgError:
raise ValueError('Reference set is not valid.')
x = list(np.dot(R_inv,e))
for a,v in zip(atoms,x):
ref_offsets[a] = v
new_data = {}
for key in energy_dict:
composition = composition_dict[key]
E = energy_dict[key]
for symb in composition:
E -= ref_offsets[symb]*composition[symb]
new_data[key] = round(E,5)
return new_data,ref_offsets
def parse_constraint(minmaxlist,name):
"""
Parse constraints for the relation. Returns two lists of minimum and maximum constraints
:param minmaxlist: List of minimum and maximum constraints.
:type minmaxlist: list
:param name: Name for the list of constraints.
:type name: str
.. todo:: Explain minmaxlist and name
"""
minlist = []
maxlist = []
for mm in minmaxlist:
try:
if mm is None:
minv = -1e99
maxv = 1e99
elif str(mm).count(':') == 1:
minv,maxv = [float(v) for v in mm.split(':')]
elif mm == '+':
minv = 0
maxv = 1e99
elif mm == '-':
minv = -1e99
maxv = 0
else:
minv = float(mm)
maxv = float(mm)
except (ValueError,TypeError,AttributeError):
raise ValueError('Could not parse constraint for '+\
name+': '+str(minmaxlist))
minlist.append(minv)
maxlist.append(maxv)
return minlist,maxlist
def constrained_relaxation(
A,b,x0,x_min,x_max,max_iter = 100000,tolerance = 1e-10):
"""
Solve Ax=b subject to the constraints that
x_i > x_min_i and x_i < x_max_i. Algorithm is from Axelson 1996.
Note that x_min/Max are both lists/arrays of length equal to x
:param A: A matrix.
:type A: numpy.array
:param b: b vector.
:type b: numpy.array
:param x0: x vector
:type x0: numpy.array
:param x_min: Minimum constraints.
:type x_min: array_like
:param x_max: Maximum constraints.
:type x_max: array_like
:param max_iter: Maximum number of iterations.
:type max_iter: int, optional
:param tolerance: Tolerance.
:type tolerance: float, optional
.. todo:: Check to make sure docstring is correct.
"""
#define functional corresponding to Ax=b
def J(x,A,b):
"""
Functional of x which corresponds to Ax=b for
the relaxation method used.
:param x: x vector.
:type x: array_like
:param A: A matrix.
:type A: numpy.array
:param b: b vector.
:param b: array_like
.. todo:: Check that docstring is correct
"""
answer = np.dot(
np.dot(np.dot(x.T,A.T),A),x) - 2*np.dot(np.dot(b.T,A),x)
return answer
ai = x_min
bi = x_max
N = len(x0)
def find_min(q):
"""
Find minimum
.. todo:: Explain what this does in the context of constrained_relaxation.
"""
u[q] = 0
v = np.dot(A,u)
num1 = 0
num2 = 0
denom = 0
Aq = A[:,q]
for k in range(0,N):
num2 += np.dot(v,Aq)
num1 += np.dot(b,Aq)
denom += np.dot(Aq,Aq)
zeta = (num1-num2)/denom
if zeta > bi[q]: zeta = bi[q]
if zeta < ai[q]: zeta = ai[q]
return zeta
u = catmap.copy(x0)
nIter =0
converged = False
while nIter < max_iter and converged == False:
nIter += 1
fOld = J(u,A,b)
for j in range(0,N):
u[j] = find_min(j)
fNew = J(u,A,b)
fDiff = fOld - fNew
if np.linalg.norm(fDiff) < tolerance:
converged = True
if converged == True:
return u
else:
raise ValueError('Constrained relaxation did not converge.'+
'Residual was '+str(np.linalg.norm(fDiff)))
def scaling_coefficient_matrix(
parameter_dict, descriptor_dict, surface_names,
parameter_names=None, coeff_mins = 0, coeff_maxs = 1e99,
return_error_dict = False):
"""Class for determining adsorption and transition-state energies
as a linear function of descriptors.
:param parameter_dict: Dictionary where the key is adsorbate name
and the value is a list of adsorption energies for each surface.
If some surfaces do not have an adsorption energy use None
as a placeholder.
:type parameter_dict: dict
:param descriptor_dict: Dictionary where the key is surface name and the
value is a list of descriptor values for each surface.
:type descriptor_dict: dict
:param surface_names: List of surfaces which defines the order of
surface adsorption energies in parameter_dict.
:type surface_names: list
:param parameter_names: List of adsorbates which defines the order
of adsorption coefficients in the output. Default is the
order of parameter_dict.keys().
:type parameter_names: list, optional
:param coeff_mins: Defines the minimum value of the coefficient
for each descriptor. Should be a matrix/array/list of lists
which matches the shape of the expected output.
:type coeff_mins: float, optional
:param coeff_maxs: Same as coeff_mins but for the maximum value of the coefficient.
:type coeff_maxs: float, optional
:param return_error_dict: Specify whether or not to return a dictionary of the errors.
:type return_error_dict: bool, optional
"""
#Define adsorbate order if it isn't
if not parameter_names: parameter_names = parameter_dict.keys()
#Check that input dictionaries are valid.
def check_lengths(dictionary,force_numeric = False):
"""
Check that the input dictionaries are valid.
:param dictionary: Input dictionary.
:type dictionary: dict
:param force_numeric: Ensure that all values in the dictionary are numeric.
:type force_numeric: bool, optional
"""
for val in dictionary.values():
if len(val) != len(dictionary.values()[0]):
key_len = '\n'.join([key+':'+str(len(dictionary[key]))
for key in dictionary])
raise ValueError('All values must be lists of same length.'+
'Use None as placeholder. \nKey:Length\n'+key_len)
if force_numeric:
try:
[float(num) for num in val]
except:
raise ValueError('All values must be numeric. '+
'Error when parsing ' + str(val))
check_lengths(parameter_dict,False)
check_lengths(descriptor_dict,True)
#initialize coefficient matrix that will be returned.
C = np.zeros(
(len(descriptor_dict.values()[0])+1,len(parameter_dict.keys())))
#initialize error dictionary that will be returned
#(if return_error_dict=True)
error_dict = {}
for key in parameter_dict:
error_dict[key] = [None]*len(surface_names)
#initialize descriptor matrix that will be used if
#all surfaces are present for a given adsorbate
Dtotal = np.zeros((len(surface_names),len(descriptor_dict.values()[0])+1))
for i,Dsurf in enumerate(surface_names):
Dtotal[i,-1] = 1 #constant term.
for j,DE in enumerate(descriptor_dict[Dsurf]):
Dtotal[i,j] = float(DE)
for Nads,ads in enumerate(parameter_names):
#construct vectors for relaxation method.
A = []
#if mins and maxs are equal then the system is fully constrained and
#there is no reason to solve for the parameters. However, in order to
#preserve ordering we use the known coeffs to put in "scaled" parameter
#energy values and solve for the coefficients which, by definition, will
#come out to be the same as the constraints.
if coeff_mins[Nads] == coeff_maxs[Nads]:
coeffs = coeff_mins[Nads]
surfs = surface_names
for surf in surfs:
descriptors_i = descriptor_dict[surf]
ads_i = sum([ci*di
for ci,di in zip(coeffs,descriptors_i)]) + coeffs[-1]
A.append(ads_i)
#construct parameter vector A from parameters
#if coefficients are not totally constrained
if not A:
A = [val for val in parameter_dict[ads] if val is not None]
surfs = [surface_names[i]
for i,val in enumerate(parameter_dict[ads])
if val is not None] #determine the surfaces which have
#parameter energy values for this adsorbate
try:
A = np.array([float(val) for val in A])
except ValueError:
raise ValueError('Non-numeric value for the '
'parameters of '+ads+'.')
#construct "descriptor" matrix (note that this is done inside the
#for loop to allow different parameters to have different number
#of surfaces)
if len(surfs) <= len(descriptor_dict.values()[0])+1:
warnings.warn('Number of energies specified is less than the number'
'of free parameters for '+ads+'. Scaling is not reliable'
'unless parameters are explicitly specified in '
' constraints_dict.')
if len(surfs) == len(surface_names):
D = Dtotal
else:
D = np.zeros((len(surfs),len(descriptor_dict.values()[0])+1))
for i,Dsurf in enumerate(surfs):
D[i,-1] = 1 #constant term.
for j,DE in enumerate(descriptor_dict[Dsurf]):
D[i,j] = float(DE)
#find initial guess for the "coefficient" matrix by solving the
#unconstrained least-squares problem Dc=A using psuedo-inverse
#of D (note that this is not efficient, but it doesn't matter
#for such small matrices)
D = np.array(D)
A = np.array(A)
if len(A) > 1:
Dinv = np.linalg.pinv(D)
c0 = np.dot(Dinv,A)
#use relaxation method to solve the problem subject to the
#constraints specified by coeff_mins/Maxs.
cMin = coeff_mins[Nads]
cMax = coeff_maxs[Nads]
c = constrained_relaxation(
D,A,c0,cMin,cMax)
elif coeff_mins[Nads] == coeff_maxs[Nads]:
c = coeff_mins[Nads]
elif A:
#If there is only one data point, assume constant.
warnings.warn('Assuming constant value for: '+ads)
c = [0]*len(Dtotal[i,:])
c[-1] = A[0]
else:
warnings.warn('No data found for : '+ads+', assuming scaling parameters are 0.')
c = [0]*len(Dtotal[i,:])
for Ndesc,coeff in enumerate(c):
C[Ndesc,Nads] = np.round(coeff,5)
if return_error_dict == True:
err = np.dot(D,c) - A
for surf,errVal in zip(surfs,err):
index = surface_names.index(surf)
error_dict[ads][index] = np.round(errVal,6)
if return_error_dict == True:
return C, error_dict
else:
return C
def linear_regression(x,y,constrain_slope=None):
"""
Perform linear regression on x and y and return the slope and intercept.
:param x: x-coordinates.
:type x: array_like
:param y: y-coordinates.
:type y: array_like
:param constrain_slope: Slope constraint
:type constrain_slope: float, optional
"""
if constrain_slope is None:
m,b = catmap.plt.polyfit(x,y,1)
else:
m = float(constrain_slope)
b = sum([yi-m*xi for xi,yi in zip(x,y)])/len(x)
return m,b
def match_regex(string,regex,group_names):
"""
Find matching regular expression in string and return a dictionary of the matched expressions.
:param string: String.
:type string: str
:param regex: Regular expression.
:type regex: str
:param group_names: Corresponding names for each matched group.
:type group_names: list
.. todo:: Check that this docstring is correct.
"""
match_dict = {}
match = re.match(regex,string)
if match:
for name,val in zip(group_names,match.groups()):
match_dict[name] = val
return match_dict
else:
return None
def numerical_jacobian(f, x, matrix, h = 1e-10,diff_idxs=None):
"""
Calculate the Jacobian matrix of a function at the point x0.
This is the first derivative of a vectorial function:
f : R^m -> R^n with m >= n
Hacked from mpmath.calculus.optimize
:param f: Function.
:type f: callable
:param x:
:type x:
:param matrix:
:type matrix:
:param h:
:type h: float, optional
.. todo:: Fill in the descriptions for f, x, matrix, and h
"""
x = matrix(x)
fx = matrix(f(x))
m = len(fx)
n = len(x)
J = matrix(m, n)
if not diff_idxs:
diff_idxs = xrange(n)
for j in diff_idxs:
xj = x.copy()
delta = abs(h*xj[j])
delta = max(delta,h)
#using delta proportional to xj is more stable
#for very small numbers.
xj[j] += delta
Jj = (matrix(f(xj)) - fx)/(delta)
for i in xrange(m):
J[i,j] = Jj[i]
return J
def smooth_piecewise_linear(theta_tot,slope=1,cutoff=0.25,smoothing=0.05):
"""
Smooth piecewise linear function.
:param theta_tot:
:type theta_tot:
:param slope: slope of line
:type slope: float, optional
:param cutoff: Cutoff.
:type cutoff: float, optional
:param smoothing: Amount of smoothing.
:type smoothing: float, optional
.. todo:: Fill in descriptions and types for theta_tot
"""
x1 = cutoff + smoothing
x0 = cutoff - smoothing
if theta_tot <= x0:
c_0 = 0
dC = 0
d2C = 0
elif theta_tot <= x1:
alpha = slope/(2*(x1-x0))
c_0 = (alpha*(theta_tot-x0)**2)/theta_tot
dC = alpha*(1-(x0/theta_tot)**2)
d2C = (2*alpha*x0**2)/(theta_tot**3)
else:
c_0 = slope*(theta_tot - cutoff)/theta_tot
dC = slope*(cutoff/(theta_tot**2))
d2C = (-2*slope*cutoff)/(theta_tot**3)
return c_0, dC, d2C
def offset_smooth_piecewise_linear(theta_tot,slope=1,cutoff=0.25, smoothing=0.05, offset=0.1):
"""
Piecewise linear function with an offset. Not equivalent to piecewise linear
for second-order interactions
:param theta_tot:
:type theta_tot:
:param max_coverage: Maximum coverage.
:type max_coverage: int, optional
:param cutoff: Cutoff.
:type cutoff: float, optional
:param smoothing: Smoothing.
:type smoothing: smoothing, optional
:param offset: Offset.
:type offset: float, optional
.. todo:: Fill in description for theta_tot
"""
c_0, dC, d2C = smooth_piecewise_linear(theta_tot,slope,cutoff,smoothing)
c_0 += offset
return c_0, dC, d2C
def add_dict_in_place(dict1, dict2):
"""
Updates dict1 with elements in dict2 if they do not exist. otherwise,
add the value for key in dict2 to the value for that key in dict1
:param dict1: Dictionary.
:type dict1: dict
:param dict2: Dictionary.
:type dict2: dict
"""
for k, v in dict2.iteritems():
if k in dict1:
dict1[k] += dict2[k]
else:
dict1[k] = dict2[k]
| ajmedford/catmap | catmap/functions.py | Python | gpl-3.0 | 18,752 | [
"ASE"
] | c42f7fbaa1be25549bc2da94e3dba9485d92a3f504d92f3855348db62f6a0e9c |
# -*- coding: utf-8 -*-
#
# Copyright © 2008 Red Hat, Inc.
# Copyright © 2008 Ricky Zhou
# Copyright © 2012 Patrick Uiterwijk
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details. You should have
# received a copy of the GNU General Public License along with this program;
# if not, write to the Free Software Foundation, Inc., 51 Franklin Street,
# Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat trademarks that are
# incorporated in the source code or documentation are not subject to the GNU
# General Public License and may only be used or replicated with the express
# permission of Red Hat, Inc.
#
# Author(s): Toshio Kuratomi <tkuratom@redhat.com>
# Ricky Zhou <ricky@fedoraproject.org>
# Patrick Uiterwijk <puiterwijk@fedoraproject.org>
#
'''
Model for the Fedora Account System
'''
from datetime import datetime
import pytz
from turbogears.database import metadata, mapper, get_engine, session
from turbogears import identity, config
import turbogears
from sqlalchemy import Table, Column, ForeignKey, Sequence
from sqlalchemy import String, Integer, DateTime, Boolean
from sqlalchemy import and_, select, literal_column
from sqlalchemy.orm import relation
from sqlalchemy.exc import InvalidRequestError
# A few sqlalchemy tricks:
# Allow viewing foreign key relations as a dictionary
from sqlalchemy.orm.collections import attribute_mapped_collection
# Allow us to reference the remote table of a many:many as a simple list
from sqlalchemy.ext.associationproxy import association_proxy
from fedora.client import DictContainer
from fedora.tg.json import SABase
import fas
from fas import SHARE_CC_GROUP, SHARE_LOC_GROUP
# Bind us to the database defined in the config file.
get_engine()
#
# Tables Mapped from the DB
#
PeopleTable = Table('people', metadata, autoload=True)
PersonRolesTable = Table('person_roles', metadata, autoload=True)
ConfigsTable = Table('configs', metadata, autoload=True)
GroupsTable = Table('groups', metadata, autoload=True)
BugzillaQueueTable = Table('bugzilla_queue', metadata, autoload=True)
LogTable = Table('log', metadata, autoload=True)
RequestsTable = Table('requests', metadata, autoload=True)
SessionTable = Table('session', metadata, autoload=True)
#
# Selects for filtering roles
#
ApprovedRolesSelect = PersonRolesTable.select(and_(
PeopleTable.c.id==PersonRolesTable.c.person_id,
PersonRolesTable.c.role_status=='approved')).alias('approved')
UnApprovedRolesSelect = PersonRolesTable.select(and_(
PeopleTable.c.id==PersonRolesTable.c.person_id,
PersonRolesTable.c.role_status!='approved')).alias('unapproved')
# The identity schema -- These must follow some conventions that TG
# understands and are shared with other Fedora services via the python-fedora
# module.
visits_table = Table('visit', metadata,
Column('visit_key', String(40), primary_key=True),
Column('created', DateTime, nullable=False, default=datetime.now(pytz.utc)),
Column('expiry', DateTime)
)
visit_identity_table = Table('visit_identity', metadata,
Column('visit_key', String(40), ForeignKey('visit.visit_key'),
primary_key=True),
Column('user_id', Integer, ForeignKey('people.id'), index=True),
Column('ssl', Boolean)
)
serial_seq = Sequence('serial_seq')
#
# Mapped Classes
#
admin_group = config.get('admingroup', 'accounts')
system_group = config.get('systemgroup', 'fas-system')
thirdparty_group = config.get('thirdpartygroup', 'thirdparty')
class People(SABase):
'''Records for all the contributors to Fedora.'''
# Map the people fields that various classes of users are allowed to retrieve
allow_fields = {
# This is the complete list of fields
'complete': ('id', 'username', 'human_name', 'gpg_keyid', 'ssh_key',
'password', 'passwordtoken', 'password_changed', 'email',
'emailtoken', 'unverified_email', 'comments', 'postal_address',
'telephone', 'facsimile', 'affiliation', 'certificate_serial',
'creation', 'internal_comments', 'ircnick', 'last_seen', 'status',
'status_change', 'locale', 'timezone', 'latitude', 'longitude',
'country_code', 'privacy', 'old_password', 'alias_enabled',
'security_question', 'security_answer'),
# Full disclosure to admins
'admin': ('id', 'username', 'human_name', 'gpg_keyid', 'ssh_key',
'password', 'passwordtoken', 'password_changed', 'email',
'emailtoken', 'unverified_email', 'comments', 'postal_address',
'telephone', 'facsimile', 'affiliation', 'certificate_serial',
'creation', 'internal_comments', 'ircnick', 'last_seen', 'status',
'status_change', 'locale', 'timezone', 'latitude', 'longitude',
'country_code', 'privacy', 'old_password', 'alias_enabled',
'security_question', 'security_answer'),
# Full disclosure to systems group
'systems': ('id', 'username', 'human_name',
'gpg_keyid', 'ssh_key', 'password', 'passwordtoken',
'password_changed', 'email', 'emailtoken', 'unverified_email',
'comments', 'postal_address', 'telephone', 'facsimile',
'affiliation', 'certificate_serial', 'creation',
'internal_comments', 'ircnick', 'last_seen', 'status',
'status_change', 'locale', 'timezone', 'latitude',
'longitude', 'country_code', 'privacy', 'old_password',
'alias_enabled', 'security_question', 'security_answer'),
# thirdparty gets the results of privacy and ssh_key in addition
'thirdparty': ('ssh_key',),
'self': ('id', 'username', 'human_name', 'gpg_keyid', 'ssh_key',
'password', 'password_changed', 'email', 'unverified_email',
'comments', 'postal_address', 'telephone', 'facsimile',
'affiliation', 'certificate_serial', 'creation', 'ircnick',
'last_seen', 'status', 'status_change', 'locale', 'timezone',
'latitude', 'longitude', 'country_code', 'privacy',
'old_password', 'security_question'),
'public': ('id', 'username', 'human_name', 'gpg_keyid', 'email',
'comments', 'affiliation', 'certificate_serial', 'creation',
'last_seen', 'status', 'status_change', 'locale', 'ircnick',
'timezone', 'latitude', 'longitude', 'country_code',
'privacy'),
'privacy': ('id', 'username', 'email', 'comments',
'certificate_serial', 'creation', 'last_seen', 'status',
'status_change', 'privacy'),
'anonymous': ('id', 'username', 'comments', 'creation', 'privacy'),
}
@classmethod
def by_id(cls, id):
'''
A class method that can be used to search users
based on their unique id
'''
return cls.query.filter_by(id=id).one()
@classmethod
def by_email_address(cls, email):
'''
A class method that can be used to search users
based on their email addresses since it is unique.
'''
return cls.query.filter_by(email=email).one()
@classmethod
def by_username(cls, username):
'''
A class method that permits to search users
based on their username attribute.
'''
return cls.query.filter_by(username=username).one()
# If we're going to do logging here, we'll have to pass the person that did the applying.
def apply(cls, group, requester):
'''
Apply a person to a group
'''
if group in cls.memberships:
raise fas.ApplyError, _('user is already in this group')
else:
role = PersonRoles()
role.role_status = 'unapproved'
role.role_type = 'user'
role.member = cls
role.group = group
def upgrade(cls, group, requester):
'''
Upgrade a user in a group - requester for logging purposes
'''
if not group in cls.memberships:
raise fas.UpgradeError, _('user is not a member')
else:
role = PersonRoles.query.filter_by(member=cls, group=group).one()
if role.role_type == 'administrator':
raise fas.UpgradeError, _('administrators cannot be upgraded any further')
elif role.role_type == 'sponsor':
role.role_type = 'administrator'
elif role.role_type == 'user':
role.role_type = 'sponsor'
def downgrade(cls, group, requester):
'''
Downgrade a user in a group - requester for logging purposes
'''
if not group in cls.memberships:
raise fas.DowngradeError, _('user is not a member')
else:
role = PersonRoles.query.filter_by(member=cls, group=group).one()
if role.role_type == 'user':
raise fas.DowngradeError, _('users cannot be downgraded any further')
elif role.role_type == 'sponsor':
role.role_type = 'user'
elif role.role_type == 'administrator':
role.role_type = 'sponsor'
def sponsor(cls, group, requester):
# If we want to do logging, this might be the place.
if not group in cls.unapproved_memberships:
raise fas.SponsorError, _('user is not an unapproved member')
role = PersonRoles.query.filter_by(member=cls, group=group).one()
role.role_status = 'approved'
role.sponsor = requester
role.approval = datetime.now(pytz.utc)
cls._handle_auto_add(group, requester)
def _handle_auto_add(cls, group, requester):
"""
Handle automatic group approvals
"""
auto_approve_groups = config.get('auto_approve_groups')
associations = auto_approve_groups.split('|')
approve_group_queue = []
for association in associations:
(groupname, approve_groups) = association.split(':', 1)
if groupname == group.name:
approve_group_queue.extend(approve_groups.split(','))
for groupname in approve_group_queue:
approve_group = Groups.by_name(groupname)
cls._auto_add(approve_group, requester)
def _auto_add(cls, group, requester):
"""
Ensure that a person is approved in a group
"""
try:
role = PersonRoles.query.filter_by(member=cls, group=group).one()
if role.role_status != 'approved':
role.role_status = 'approved'
role.sponsor = requester
role.approval = datetime.now(pytz.utc)
except InvalidRequestError:
role = PersonRoles()
role.member = cls
role.group = group
role.role_type = 'user'
role.sponsor = requester
role.role_status = 'approved'
role.approval = datetime.now(pytz.utc)
def remove(cls, group, requester):
if not group in cls.memberships:
raise fas.RemoveError, _('user is not a member')
else:
role = PersonRoles.query.filter_by(member=cls, group=group).one()
session.delete(role)
def set_share_cc(self, value):
share_cc_group = Groups.by_name(SHARE_CC_GROUP)
if value:
try:
self.apply(share_cc_group, self)
self.sponsor(share_cc_group, self)
except fas.ApplyError:
pass
except fas.SponsorError:
pass
else:
try:
self.remove(share_cc_group, self)
except fas.SponsorError:
pass
def get_share_cc(self):
return Groups.by_name(SHARE_CC_GROUP) in self.memberships
def set_share_loc(self, value):
share_loc_group = Groups.by_name(SHARE_LOC_GROUP)
if value:
try:
self.apply(share_loc_group, self)
self.sponsor(share_loc_group, self)
except fas.ApplyError:
pass
except fas.SponsorError:
pass
else:
try:
self.remove(share_loc_group, self)
except fas.SponsorError:
pass
def get_share_loc(self):
return Groups.by_name(SHARE_LOC_GROUP) in self.memberships
def filter_private(self, user='public'):
'''Filter out data marked private unless the user is authorized.
Some data in this class can only be released if the user has not asked
for it to be private. Calling this method will filter the information
out so it doesn't go anywhere.
This method will disconnect the data structure from being persisted in
the database and then remove the information that the user should not
be allowed to see.
If it's an admin, then all data will be returned. If it's
anything else, parts of the information will be removed.
Note that it is not foolproof. For instance, a template could be
written that traverses from people to groups to a different person
and retrieves information from there. However, this would not be a
standard use of this method so we should know when we're doing
non-standard things and filter the data there as well.
'''
person_data = DictContainer()
try:
if identity.in_any_group(admin_group, system_group):
# Admin and system are the same for now
user ='admin'
elif identity.current.user_name == self.username:
user = 'self'
elif identity.current.anonymous:
user = 'anonymous'
elif self.privacy:
user = 'privacy'
else:
user = 'public'
for field in self.allow_fields[user]:
person_data[field] = self.__dict__[field]
# thirdparty users need to get some things so that users can login to
# their boxes.
if identity.in_group(thirdparty_group):
for field in self.allow_fields['thirdparty']:
person_data[field] = self.__dict__[field]
except:
# Typically this exception means this was called by shell
for field in self.allow_fields[user]:
person_data[field] = self.__dict__[field]
# Instead of None password fields, we set it to '*' for easier fasClient
# parsing
if 'password' not in person_data:
person_data['password'] = '*'
# Make sure we have empty fields for the rest of the info
for field in self.allow_fields['complete']:
if field not in person_data:
person_data[field] = None
person_data['group_roles'] = {}
for field in self.roles:
person_data['group_roles'][field.groupname] = field
person_data['memberships'] = list(self.memberships)
person_data['roles'] = self.roles
return person_data
def __repr__(cls):
return "User(%s,%s)" % (cls.username, cls.human_name)
memberships = association_proxy('roles', 'group')
approved_memberships = association_proxy('approved_roles', 'group')
unapproved_memberships = association_proxy('unapproved_roles', 'group')
class PersonRoles(SABase):
'''Record people that are members of groups.'''
def __repr__(cls):
return "PersonRole(%s,%s,%s,%s)" % (cls.member.username, cls.group.name, cls.role_type, cls.role_status)
groupname = association_proxy('group', 'name')
class Configs(SABase):
'''Configs for applications that a Fedora Contributor uses.'''
pass
class Groups(SABase):
'''Group that people can belong to.'''
@classmethod
def by_id(cls, group_id):
'''
A class method that can be used to search groups
based on their unique id
'''
return cls.query.filter_by(id=group_id).one()
@classmethod
def by_email_address(cls, email):
'''
A class method that can be used to search groups
based on their email addresses since it is unique.
'''
return cls.query.filter_by(email=email).one()
@classmethod
def by_type(cls, grptype):
'''
A class method that permits to search groups
based on their type attribute.
'''
return cls.query.filter_by(group_type=grptype).all()
@classmethod
def by_name(cls, name):
'''
A class method that permits to search groups
based on their name attribute.
'''
return cls.query.filter_by(name=name).one()
def delete(cls):
for role in cls.roles:
session.delete(role)
session.delete(cls)
def __repr__(cls):
return "Groups(%s,%s)" % (cls.name, cls.display_name)
# People in the group
people = association_proxy('roles', 'member')
def __json__(self):
'''We want to make sure we keep a tight reign on sensistive information.
Thus we strip out certain information unless a user is an admin.
Current access restrictions
===========================
Anonymous users can see:
:id: The id in the account system and on the shell servers
:name: Username in FAS
:display_name: Human name of the person
:group_type: The type of group
:needs_sponsor: Whether the group requirse a sponsor or not
:user_can_remove: Whether users can remove themselves from the group
:creation: Date this group was created
:joinmsg: The join message for the group
:prequisite_id: The prerequisite for the group
:owner_id: The owner of the group
Authenticated Users add:
:email: The group email address
Admins gets access to this final field as well:
:unverified_email: An unverified email
:email_token: The token for setting an email
'''
props = super(Groups, self).__json__()
# These columns no longer exist, but here's an example of restricting info.
#if identity.current.anonymous:
# # Anonymous users can't see any of these
# del props['email']
#if not identity.in_group('fas-system'):
# if not identity.in_group('accounts'):
# # Only admins can see internal_comments
# del props['unverified_email']
# del props['emailtoken']
return props
class BugzillaQueue(SABase):
'''Queued up changes that need to be applied to bugzilla.'''
def __repr__(cls):
return "BugzillaQueue(%s,%s,%s,%s)" % (cls.person.username, cls.email, cls.group.name, cls.action)
class Log(SABase):
'''Write simple logs of changes to the database.'''
pass
class Requests(SABase):
'''
Requests for certain resources may be restricted based on the user or host.
'''
pass
#
# Classes for mapping arbitrary selectables (This is similar to a view in
# python rather than in the db
#
class ApprovedRoles(PersonRoles):
'''Only display roles that are approved.'''
pass
class UnApprovedRoles(PersonRoles):
'''Only show Roles that are not approved.'''
pass
#
# Classes for the SQLAlchemy Visit Manager
#
class Visit(SABase):
'''Track how many people are visiting the website.
It doesn't currently make sense for us to track this here so we clear this
table of stale records every hour.
'''
@classmethod
def lookup_visit(cls, visit_key):
return cls.query.get(visit_key)
class VisitIdentity(SABase):
'''Associate a user with a visit cookie.
This allows users to log in to app.
'''
pass
class Session(SABase):
'''Session'''
pass
#
# set up mappers between tables and classes
#
mapper(Session, SessionTable)
#
# mappers for filtering roles
#
mapper(ApprovedRoles, ApprovedRolesSelect, properties = {
'group': relation(Groups, backref='approved_roles', lazy = False)
})
mapper(UnApprovedRoles, UnApprovedRolesSelect, properties = {
'group': relation(Groups, backref='unapproved_roles', lazy = False)
})
#
# General Mappers
#
mapper(People, PeopleTable, properties = {
# This name is kind of confusing. It's to allow person.group_roles['groupname'] in order to make auth.py (hopefully) slightly faster.
'group_roles': relation(PersonRoles,
collection_class = attribute_mapped_collection('groupname'),
primaryjoin = PeopleTable.c.id==PersonRolesTable.c.person_id),
'approved_roles': relation(ApprovedRoles, backref='member',
primaryjoin = PeopleTable.c.id==ApprovedRoles.person_id),
'unapproved_roles': relation(UnApprovedRoles, backref='member',
primaryjoin = PeopleTable.c.id==UnApprovedRoles.person_id),
'roles': relation(PersonRoles, backref='member',
primaryjoin = PersonRolesTable.c.person_id==PeopleTable.c.id)
})
mapper(PersonRoles, PersonRolesTable, properties = {
'group': relation(Groups, backref='roles', lazy = False,
primaryjoin=PersonRolesTable.c.group_id==GroupsTable.c.id),
'sponsor': relation(People, uselist=False,
primaryjoin = PersonRolesTable.c.sponsor_id==PeopleTable.c.id)
})
mapper(Configs, ConfigsTable, properties = {
'person': relation(People, backref = 'configs')
})
mapper(Groups, GroupsTable, properties = {
'owner': relation(People, uselist=False,
primaryjoin = GroupsTable.c.owner_id==PeopleTable.c.id),
'prerequisite': relation(Groups, uselist=False,
remote_side=[GroupsTable.c.id],
primaryjoin = GroupsTable.c.prerequisite_id==GroupsTable.c.id)
})
mapper(BugzillaQueue, BugzillaQueueTable, properties = {
'group': relation(Groups, lazy = False, backref = 'pending'),
'person': relation(People, lazy = False, backref = 'pending'),
### TODO: test to be sure SQLAlchemy only loads the backref on demand
'author': relation(People, backref='changes')
})
mapper(Requests, RequestsTable, properties = {
'person': relation(People, backref='requests')
})
mapper(Log, LogTable)
# TurboGears Identity
mapper(Visit, visits_table)
mapper(VisitIdentity, visit_identity_table,
properties=dict(users=relation(People, backref='visit_identity')))
| bstinsonmhk/fas | fas/model/fasmodel.py | Python | gpl-2.0 | 22,810 | [
"VisIt"
] | afda160b4b05d2cbe696aea3d7e92c2eba15a9796e87341dc94a34b65087673e |
"""
Two-dimensional pattern generators drawing from various random distributions.
"""
__version__='$Revision$'
import numpy
from numpy import zeros,floor,where,choose,less,greater
import param
from param.parameterized import ParamOverrides
from .patterngenerator import PatternGenerator
from . import Composite, Gaussian
from .sheetcoords import SheetCoordinateSystem
def seed(seed=None):
"""
Set the seed on the shared RandomState instance.
Convenience function: shortcut to RandomGenerator.random_generator.seed().
"""
RandomGenerator.random_generator.seed(seed)
class RandomGenerator(PatternGenerator):
"""2D random noise pattern generator abstract class."""
__abstract = True
# The orientation is ignored, so we don't show it in
# auto-generated lists of parameters (e.g. in the GUI)
orientation = param.Number(precedence=-1)
random_generator = param.Parameter(
default=numpy.random.RandomState(seed=(500,500)),precedence=-1,doc=
"""
numpy's RandomState provides methods for generating random
numbers (see RandomState's help for more information).
Note that all instances will share this RandomState object,
and hence its state. To create a RandomGenerator that has its
own state, set this parameter to a new RandomState instance.
""")
def _distrib(self,shape,p):
"""Method for subclasses to override with a particular random distribution."""
raise NotImplementedError
# Optimization: We use a simpler __call__ method here to skip the
# coordinate transformations (which would have no effect anyway)
def __call__(self,**params_to_override):
p = ParamOverrides(self,params_to_override)
shape = SheetCoordinateSystem(p.bounds,p.xdensity,p.ydensity).shape
result = self._distrib(shape,p)
self._apply_mask(p,result)
for of in p.output_fns:
of(result)
return result
class UniformRandom(RandomGenerator):
"""2D uniform random noise pattern generator."""
def _distrib(self,shape,p):
return p.random_generator.uniform(p.offset, p.offset+p.scale, shape)
class BinaryUniformRandom(RandomGenerator):
"""
2D binary uniform random noise pattern generator.
Generates an array of random numbers that are 1.0 with the given
on_probability, or else 0.0, then scales it and adds the offset as
for other patterns. For the default scale and offset, the result
is a binary mask where some elements are on at random.
"""
on_probability = param.Number(default=0.5,bounds=[0.0,1.0],doc="""
Probability (in the range 0.0 to 1.0) that the binary value
(before scaling) is on rather than off (1.0 rather than 0.0).""")
def _distrib(self,shape,p):
rmin = p.on_probability-0.5
return p.offset+p.scale*(p.random_generator.uniform(rmin,rmin+1.0,shape).round())
class GaussianRandom(RandomGenerator):
"""
2D Gaussian random noise pattern generator.
Each pixel is chosen independently from a Gaussian distribution
of zero mean and unit variance, then multiplied by the given
scale and adjusted by the given offset.
"""
scale = param.Number(default=0.25,softbounds=(0.0,2.0))
offset = param.Number(default=0.50,softbounds=(-2.0,2.0))
def _distrib(self,shape,p):
return p.offset+p.scale*p.random_generator.standard_normal(shape)
# CEBALERT: in e.g. script_repr, an instance of this class appears to
# have only pattern.Constant() in its list of generators, which might
# be confusing. The Constant pattern has no effect because the
# generators list is overridden in __call__. Shouldn't the generators
# parameter be hidden for this class (and possibly for others based on
# pattern.Composite)? For that to be safe, we'd at least have to have
# a warning if someone ever sets a hidden parameter, so that having it
# revert to the default value would always be ok.
class GaussianCloud(Composite):
"""Uniform random noise masked by a circular Gaussian."""
operator = param.Parameter(numpy.multiply)
gaussian_size = param.Number(default=1.0,doc="Size of the Gaussian pattern.")
aspect_ratio = param.Number(default=1.0,bounds=(0.0,None),softbounds=(0.0,2.0),
precedence=0.31,doc="""
Ratio of gaussian width to height; width is gaussian_size*aspect_ratio.""")
def __call__(self,**params_to_override):
p = ParamOverrides(self,params_to_override)
p.generators=[Gaussian(aspect_ratio=p.aspect_ratio,size=p.gaussian_size),
UniformRandom()]
return super(GaussianCloud,self).__call__(**p)
### JABHACKALERT: This code seems to work fine when the input regions
### are all the same size and shape, but for
### e.g. examples/hierarchical.ty the resulting images in the Test
### Pattern preview window are square (instead of the actual
### rectangular shapes), matching between the eyes (instead of the
### actual two different rectangles), and with dot sizes that don't
### match between the eyes. It's not clear why this happens.
class RandomDotStereogram(PatternGenerator):
"""
Random dot stereogram using rectangular black and white patches.
Based on Matlab code originally from Jenny Read, reimplemented
in Python by Tikesh Ramtohul (2006).
"""
# Suppress unused parameters
x = param.Number(precedence=-1)
y = param.Number(precedence=-1)
size = param.Number(precedence=-1)
orientation = param.Number(precedence=-1)
# Override defaults to make them appropriate
scale = param.Number(default=0.5)
offset = param.Number(default=0.5)
# New parameters for this pattern
#JABALERT: Should rename xdisparity and ydisparity to x and y, and simply
#set them to different values for each pattern to get disparity
xdisparity = param.Number(default=0.0,bounds=(-1.0,1.0),softbounds=(-0.5,0.5),
precedence=0.50,doc="Disparity in the horizontal direction.")
ydisparity = param.Number(default=0.0,bounds=(-1.0,1.0),softbounds=(-0.5,0.5),
precedence=0.51,doc="Disparity in the vertical direction.")
dotdensity = param.Number(default=0.5,bounds=(0.0,None),softbounds=(0.1,0.9),
precedence=0.52,doc="Number of dots per unit area; 0.5=50% coverage.")
dotsize = param.Number(default=0.1,bounds=(0.0,None),softbounds=(0.05,0.15),
precedence=0.53,doc="Edge length of each square dot.")
random_seed=param.Integer(default=500,bounds=(0,1000),
precedence=0.54,doc="Seed value for the random position of the dots.")
def __call__(self,**params_to_override):
p = ParamOverrides(self,params_to_override)
xsize,ysize = SheetCoordinateSystem(p.bounds,p.xdensity,p.ydensity).shape
xsize,ysize = int(round(xsize)),int(round(ysize))
xdisparity = int(round(xsize*p.xdisparity))
ydisparity = int(round(xsize*p.ydisparity))
dotsize = int(round(xsize*p.dotsize))
bigxsize = 2*xsize
bigysize = 2*ysize
ndots=int(round(p.dotdensity * (bigxsize+2*dotsize) * (bigysize+2*dotsize) /
min(dotsize,xsize) / min(dotsize,ysize)))
halfdot = floor(dotsize/2)
# Choose random colors and locations of square dots
random_seed = p.random_seed
numpy.random.seed(random_seed*12+random_seed*99)
col=numpy.where(numpy.random.random((ndots))>=0.5, 1.0, -1.0)
numpy.random.seed(random_seed*122+random_seed*799)
xpos=numpy.floor(numpy.random.random((ndots))*(bigxsize+2*dotsize)) - halfdot
numpy.random.seed(random_seed*1243+random_seed*9349)
ypos=numpy.floor(numpy.random.random((ndots))*(bigysize+2*dotsize)) - halfdot
# Construct arrays of points specifying the boundaries of each
# dot, cropping them by the big image size (0,0) to (bigxsize,bigysize)
x1=xpos.astype(numpy.int) ; x1=choose(less(x1,0),(x1,0))
y1=ypos.astype(numpy.int) ; y1=choose(less(y1,0),(y1,0))
x2=(xpos+(dotsize-1)).astype(numpy.int) ; x2=choose(greater(x2,bigxsize),(x2,bigxsize))
y2=(ypos+(dotsize-1)).astype(numpy.int) ; y2=choose(greater(y2,bigysize),(y2,bigysize))
# Draw each dot in the big image, on a blank background
bigimage = zeros((bigysize,bigxsize))
for i in range(ndots):
bigimage[y1[i]:y2[i]+1,x1[i]:x2[i]+1] = col[i]
result = p.offset + p.scale*bigimage[ (ysize/2)+ydisparity:(3*ysize/2)+ydisparity ,
(xsize/2)+xdisparity:(3*xsize/2)+xdisparity ]
for of in p.output_fns:
of(result)
return result
class DenseNoise(RandomGenerator):
"""
2D Dense noise pattern generator, constrained to a grid.
Similar to UniformRandom, but draws the noise pattern in a grid
that can be smaller than the actual density of the
PatternGenerator.
By default, this produces a matrix with random values 0.0, 0.5, and 1.
When a scale and an offset are provided the transformation maps them to:
0 -> offset
0.5 -> offset + 0.5 * scale
1 -> offset + scale
--------
Examples
--------
DenseNoise(grid_density=1, bounds=BoundingBox(radius=1),
xdensity=4, ydensity=4) will produce something like this:
[[ 1. 1. 1. 1. 0. 0. 0. 0. ]
[ 1. 1. 1. 1. 0. 0. 0. 0. ]
[ 1. 1. 1. 1. 0. 0. 0. 0. ] Here the Sheet-coordinate size is 2.0x2.0,
[ 1. 1. 1. 1. 0. 0. 0. 0. ] so grid_density=1 yields a 2x2 grid
[ 0. 0. 0. 0. 0.5 0.5 0.5 0.5] sampled at 4 units per grid cell
[ 0. 0. 0. 0. 0.5 0.5 0.5 0.5]
[ 0. 0. 0. 0. 0.5 0.5 0.5 0.5]
[ 0. 0. 0. 0. 0.5 0.5 0.5 0.5]])
DenseNoise(grid_density=2, bounds=BoundingBox(radius=1),
xdensity=4, ydensity=4) on the other hand will produce something like:
[[ 1. 1. 0. 0. 0. 0. 0.5 0.5]
[ 1. 1. 0. 0. 0. 0. 0.5 0.5]
[ 1. 1. 1. 1. 0. 0. 0. 0. ] Again the Sheet-coordinate size is 2.0x2.0,
[ 1. 1. 1. 1. 0. 0. 0. 0. ] but grid_density=2 yields a 4x4 grid
[ 0. 0. 0.5 0.5 1. 1. 1. 1. ] with 2 units per grid cell
[ 0. 0. 0.5 0.5 1. 1. 1. 1. ]
[ 1. 1. 0. 0. 1. 1. 1. 1. ]
[ 1. 1. 0. 0. 1. 1. 1. 1. ]]
-----
Notes
-----
1. This method works much faster when the noise matrix falls neatly
into the pixel matrix (~100 times faster).
2. The value of each pixel in the generated pattern is determined
by where the center of that pixel lies in the underlying grid,
regardless of any overlap of that pixel with other grid
squares.
3. If a particular number of cells N is wanted, divide it by the
length of the side of the bounding box to determine the
grid_density. For example, if the user wants to have N=10 cells
for a BoundingBox(radius=1) (which gives a bounding box size of
2.0x2.0), the grid_density must be set to N/2 = 5 in order to
have ten cells.
4. The xdensity and ydensity must both be at least as large as the
grid_density, e.g. 5 for the above example.
"""
grid_density = param.Number(default=10, bounds=(1,None), softbounds=(1,50), doc="""
Grid elements per 1.0 distance in Sheet coordinates.""")
# Hide unused parameters
x = param.Number(precedence=-1)
y = param.Number(precedence=-1)
size = param.Number(precedence=-1)
def _distrib(self, shape, p):
max_density = min(p.xdensity,p.ydensity)
if (p.grid_density > max_density and not hasattr(self,"warned_about_density")):
self.warning("Requested grid_density %s larger than xdensity %s or ydensity %s; capped at %s" %
(p.grid_density,p.xdensity,p.ydensity,max_density))
p.grid_density = max_density
self.warned_about_density=True
Nx = shape[1]
Ny = shape[0] # Size of the pixel matrix
assert (Nx>0 and Ny>0), 'Pixel matrix cannot be zero'
SC = SheetCoordinateSystem(p.bounds, p.xdensity, p.ydensity)
unitary_distance_x = SC._SheetCoordinateSystem__xstep
unitary_distance_y = SC._SheetCoordinateSystem__ystep
sheet_x_size = unitary_distance_x * Nx
sheet_y_size = unitary_distance_y * Ny
# Sizes of the structure matrix
nx = int(round(sheet_x_size * p.grid_density)) # Number of points in the x's
ny = int(round(sheet_y_size * p.grid_density)) # Number of points in the y's
assert ( nx > 0 ), 'Grid density or bound box in the x dimension too small'
assert ( ny > 0 ), 'Grid density or bound box in the y dimension too small'
# If the noise grid is proportional to the pixel grid and fits
# neatly into it then this method is ~100 times faster
if ( Nx % nx == 0) and (Ny % ny == 0):
if (Nx == nx) and (Ny == ny): #This is faster to call the whole procedure
result = 0.5 * (p.random_generator.randint(-1, 2, shape) + 1)
return result * p.scale + p.offset
else:
# This is the actual matrix of the pixels
A = numpy.zeros(shape)
# Noise matrix that contains the structure of 0, 0.5, and 1's
Z = 0.5 * (p.random_generator.randint(-1, 2, (nx, ny)) + 1 )
ps_x = int(round(Nx * 1.0/ nx)) #Closest integer
ps_y = int(round(Ny * 1.0/ ny))
# Noise matrix is mapped to the pixel matrix
for i in range(nx):
for j in range(ny):
A[i * ps_y: (i + 1) * ps_y, j * ps_x: (j + 1) * ps_x] = Z[i,j]
return A * p.scale + p.offset
# General method in case the noise grid does not
# fall neatly in the pixel grid
else:
# Obtain length of the side and length of the
# division line between the grid
x_points,y_points = SC.sheetcoordinates_of_matrixidx()
# This is the actual matrix of the pixels
A = numpy.zeros(shape)
# Noise matrix that contains the structure of 0, 0.5, and 1's
Z = 0.5 * (p.random_generator.randint(-1, 2, (nx, ny)) + 1 )
size_of_block_x = Nx * 1.0 / nx
size_of_block_y = Ny * 1.0 / ny
# Noise matrix is mapped to the pixel matrix
for i in range(Nx):
for j in range(Ny):
# Map along the x coordinates
x_entry = int( i / size_of_block_x)
y_entry = int( j / size_of_block_y)
A[j][i] = Z[x_entry][y_entry]
return A * p.scale + p.offset
class SparseNoise(RandomGenerator):
"""
2D sparse noise pattern generator, with optional constraining to a grid.
Draws a square pixel of random brightness at a random location,
either entirely random on the pattern surface or chosen from a
predefined grid of possible positions.
By default, produces a matrix with 0.5 everywhere except for a
square patch in one random location. This value is randomly
assigned to either 0 or 1, and then is scaled with the parameters
scale and offset in the following way:
0 -> offset
1 -> offset + scale
--------
Examples
--------
SparseNoise(grid_density=1, grid=True, bounds=BoundingBox(radius=1),
xdensity=4, ydensity=4) will produce something like this:
[[ 0.5 0.5 0.5 0.5 0. 0. 0. 0. ]
[ 0.5 0.5 0.5 0.5 0. 0. 0. 0. ]
[ 0.5 0.5 0.5 0.5 0. 0. 0. 0. ] Here the Sheet-coordinate size is 2.0x2.0,
[ 0.5 0.5 0.5 0.5 0. 0. 0. 0. ] so grid_density=1 yields a 2x2 grid
[ 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5] sampled at 4 units per grid cell, with 0.5
[ 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5] everywhere except the one active cell
[ 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5]
[ 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5]]
SparseNoise(grid_density=2, grid=True, bounds=BoundingBox(radius=1),
xdensity=4, ydensity=4) on the other hand will produce something like:
[[ 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5]
[ 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5]
[ 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5] Again the Sheet-coordinate size is 2.0x2.0,
[ 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5] but grid_density=2 yields a 4x4 grid
[ 0.5 0.5 0.5 0.5 0.5 0.5 1. 1. ] with 2 units per grid cell
[ 0.5 0.5 0.5 0.5 0.5 0.5 1. 1. ]
[ 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5]
[ 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5]]
SparseNoise(grid_density=2, grid=False, bounds=BoundingBox(radius=1),
xdensity=4, ydensity=4) will produce something like:
[[ 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5]
[ 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5]
[ 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5]
[ 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5] Here notice that the patch is no longer
[ 0.5 0.5 0.5 0.5 0.5 0. 0. 0.5] aligned with a fixed grid
[ 0.5 0.5 0.5 0.5 0.5 0. 0. 0.5]
[ 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5]
[ 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5]]
-----
Notes
-----
1. This method works ~100 times faster when the noise matrix falls neatly
into the pixel matrix.
2. The value of each pixel in the generated pattern is determined
by where the center of that pixel lies in the underlying grid,
regardless of any overlap of that pixel with other grid
squares.
3. If a particular number of cells N is wanted, divide it by the
length of the side of the bounding box to determine the
grid_density. For example, if the user wants to have N=10 cells
for a BoundingBox(radius=1) (which gives a bounding box size of
2.0x2.0), the grid_density must be set to N/2 = 5 in order to
have ten cells.
4. The xdensity and ydensity must both be at least as large as the
grid_density, e.g. 5 for the above example.
"""
grid_density = param.Number(default=10, bounds=(1,None), softbounds=(1,50), doc="""
Grid elements per 1.0 distance in Sheet coordinates.""")
grid = param.Boolean(default=True, doc="""
If True, each spot is snapped to a grid, so that subsequent
spots are forced to overlap either entirely or not at all,
never partially. Otherwise, the spot size is fixed by the
grid_density, but it may appear anywhere.""")
# Hide unused parameters
x = param.Number(precedence=-1)
y = param.Number(precedence=-1)
size = param.Number(precedence=-1)
def _distrib(self, shape, p):
max_density = min(p.xdensity,p.ydensity)
if (p.grid_density > max_density and not hasattr(self,"warned_about_density")):
self.warning("Requested grid_density %s larger than xdensity %s or ydensity %s; capped at %s" %
(p.grid_density,p.xdensity,p.ydensity,max_density))
p.grid_density = max_density
self.warned_about_density=True
Nx = shape[1]
Ny = shape[0] # Size of the pixel matrix
assert (Nx>0 and Ny>0), 'Pixel matrix cannot be zero'
SC = SheetCoordinateSystem(p.bounds, p.xdensity, p.ydensity)
unitary_distance_x = SC._SheetCoordinateSystem__xstep
unitary_distance_y = SC._SheetCoordinateSystem__ystep
sheet_x_size = unitary_distance_x * Nx
sheet_y_size = unitary_distance_y * Ny
# Sizes of the structure matrix
nx = int(round(sheet_x_size * p.grid_density)) # Number of points in the x's
ny = int(round(sheet_y_size * p.grid_density)) # Number of points in the y's
assert ( nx > 0 ), 'Grid density or bound box in the x dimension too small'
assert ( ny > 0 ), 'Grid density or bound box in the y dimension too smal'
ps_x = int(round(Nx / nx)) #Closest integer
ps_y = int(round(Ny / ny))
# This is the actual matrix of the pixels
A = numpy.ones(shape) * 0.5
if p.grid == False: #The centers of the spots are randomly distributed in space
x = p.random_generator.randint(0, Nx - ps_x + 1)
y = p.random_generator.randint(0, Ny - ps_y + 1)
z = p.random_generator.randint(0,2)
# Noise matrix is mapped to the pixel matrix
A[x: (x + ps_y), y: (y + ps_x)] = z
return A * p.scale + p.offset
else: #In case you want the grid
if ( Nx % nx == 0) and (Ny % ny == 0): #When the noise grid falls neatly into the the pixel grid
x = p.random_generator.randint(0, nx)
y = p.random_generator.randint(0, ny)
z = p.random_generator.randint(0,2)
# Noise matrix is mapped to the pixel matrix (faster method)
A[x*ps_y: (x*ps_y + ps_y), y*ps_x: (y*ps_x + ps_x)] = z
return A * p.scale + p.offset
else: # If noise grid does not fit neatly in the pixel grid (slow method)
x_points,y_points = SC.sheetcoordinates_of_matrixidx()
# Obtain length of the side and length of the
# division line between the grid
size_of_block_x = Nx * 1.0 / nx
size_of_block_y = Ny * 1.0 / ny
# Construct the noise matrix
Z = numpy.ones((nx,ny)) * 0.5
x = p.random_generator.randint(0, nx)
y = p.random_generator.randint(0, ny)
z = p.random_generator.randint(0,2)
Z[x,y] = z
# Noise matrix is mapped to the pixel matrix
for i in range(Nx):
for j in range(Ny):
# Map along the x coordinates
x_entry = int( i / size_of_block_x)
y_entry = int( j / size_of_block_y)
A[j][i] = Z[x_entry][y_entry]
return A * p.scale + p.offset
class DenseNoise(RandomGenerator):
"""
2D Dense noise pattern generator, constrained to a grid.
Similar to UniformRandom, but draws the noise pattern in a grid
that can be smaller than the actual density of the
PatternGenerator.
By default, this produces a matrix with random values 0.0, 0.5, and 1.
When a scale and an offset are provided the transformation maps them to:
0 -> offset
0.5 -> offset + 0.5 * scale
1 -> offset + scale
--------
Examples
--------
DenseNoise(grid_density=1, bounds=BoundingBox(radius=1),
xdensity=4, ydensity=4) will produce something like this:
[[ 1. 1. 1. 1. 0. 0. 0. 0. ]
[ 1. 1. 1. 1. 0. 0. 0. 0. ]
[ 1. 1. 1. 1. 0. 0. 0. 0. ] Here the Sheet-coordinate size is 2.0x2.0,
[ 1. 1. 1. 1. 0. 0. 0. 0. ] so grid_density=1 yields a 2x2 grid
[ 0. 0. 0. 0. 0.5 0.5 0.5 0.5] sampled at 4 units per grid cell
[ 0. 0. 0. 0. 0.5 0.5 0.5 0.5]
[ 0. 0. 0. 0. 0.5 0.5 0.5 0.5]
[ 0. 0. 0. 0. 0.5 0.5 0.5 0.5]])
DenseNoise(grid_density=2, bounds=BoundingBox(radius=1),
xdensity=4, ydensity=4) on the other hand will produce something like:
[[ 1. 1. 0. 0. 0. 0. 0.5 0.5]
[ 1. 1. 0. 0. 0. 0. 0.5 0.5]
[ 1. 1. 1. 1. 0. 0. 0. 0. ] Again the Sheet-coordinate size is 2.0x2.0,
[ 1. 1. 1. 1. 0. 0. 0. 0. ] but grid_density=2 yields a 4x4 grid
[ 0. 0. 0.5 0.5 1. 1. 1. 1. ] with 2 units per grid cell
[ 0. 0. 0.5 0.5 1. 1. 1. 1. ]
[ 1. 1. 0. 0. 1. 1. 1. 1. ]
[ 1. 1. 0. 0. 1. 1. 1. 1. ]]
-----
Notes
-----
1. This method works much faster when the noise matrix falls neatly
into the pixel matrix (~100 times faster).
2. The value of each pixel in the generated pattern is determined
by where the center of that pixel lies in the underlying grid,
regardless of any overlap of that pixel with other grid
squares.
3. If a particular number of cells N is wanted, divide it by the
length of the side of the bounding box to determine the
grid_density. For example, if the user wants to have N=10 cells
for a BoundingBox(radius=1) (which gives a bounding box size of
2.0x2.0), the grid_density must be set to N/2 = 5 in order to
have ten cells.
4. The xdensity and ydensity must both be at least as large as the
grid_density, e.g. 5 for the above example.
"""
grid_density = param.Number(default=10, bounds=(1,None), softbounds=(1,50), doc="""
Grid elements per 1.0 distance in Sheet coordinates.""")
# Hide unused parameters
x = param.Number(precedence=-1)
y = param.Number(precedence=-1)
size = param.Number(precedence=-1)
def _distrib(self, shape, p):
max_density = min(p.xdensity,p.ydensity)
if (p.grid_density > max_density and not hasattr(self,"warned_about_density")):
self.warning("Requested grid_density %s larger than xdensity %s or ydensity %s; capped at %s" %
(p.grid_density,p.xdensity,p.ydensity,max_density))
p.grid_density = max_density
self.warned_about_density=True
Nx = shape[1]
Ny = shape[0] # Size of the pixel matrix
assert (Nx>0 and Ny>0), 'Pixel matrix cannot be zero'
SC = SheetCoordinateSystem(p.bounds, p.xdensity, p.ydensity)
unitary_distance_x = SC._SheetCoordinateSystem__xstep
unitary_distance_y = SC._SheetCoordinateSystem__ystep
sheet_x_size = unitary_distance_x * Nx
sheet_y_size = unitary_distance_y * Ny
# Sizes of the structure matrix
nx = int(round(sheet_x_size * p.grid_density)) # Number of points in the x's
ny = int(round(sheet_y_size * p.grid_density)) # Number of points in the y's
assert ( nx > 0 ), 'Grid density or bound box in the x dimension too small'
assert ( ny > 0 ), 'Grid density or bound box in the y dimension too small'
# If the noise grid is proportional to the pixel grid and fits
# neatly into it then this method is ~100 times faster
if ( Nx % nx == 0) and (Ny % ny == 0):
if (Nx == nx) and (Ny == ny): #This is faster to call the whole procedure
result = 0.5 * (p.random_generator.randint(-1, 2, shape) + 1)
return result * p.scale + p.offset
else:
# This is the actual matrix of the pixels
A = numpy.zeros(shape)
# Noise matrix that contains the structure of 0, 0.5, and 1's
Z = 0.5 * (p.random_generator.randint(-1, 2, (nx, ny)) + 1 )
ps_x = int(round(Nx * 1.0/ nx)) #Closest integer
ps_y = int(round(Ny * 1.0/ ny))
# Noise matrix is mapped to the pixel matrix
for i in range(nx):
for j in range(ny):
A[i * ps_y: (i + 1) * ps_y, j * ps_x: (j + 1) * ps_x] = Z[i,j]
return A * p.scale + p.offset
# General method in case the noise grid does not
# fall neatly in the pixel grid
else:
# Obtain length of the side and length of the
# division line between the grid
x_points,y_points = SC.sheetcoordinates_of_matrixidx()
# This is the actual matrix of the pixels
A = numpy.zeros(shape)
# Noise matrix that contains the structure of 0, 0.5, and 1's
Z = 0.5 * (p.random_generator.randint(-1, 2, (nx, ny)) + 1 )
size_of_block_x = Nx * 1.0 / nx
size_of_block_y = Ny * 1.0 / ny
# Noise matrix is mapped to the pixel matrix
for i in range(Nx):
for j in range(Ny):
# Map along the x coordinates
x_entry = int( i / size_of_block_x)
y_entry = int( j / size_of_block_y)
A[j][i] = Z[x_entry][y_entry]
return A * p.scale + p.offset
| antolikjan/imagen | imagen/random.py | Python | bsd-3-clause | 28,370 | [
"Gaussian"
] | 7ded8db1afe99005846a16bd5119697577b6bfe6a90a8b4e1cafaa267edc2c06 |
#!/usr/bin/env python
"""Test functions in openmoltools.schrodinger."""
import unittest
from openmoltools.schrodinger import *
@unittest.skipIf(not is_schrodinger_suite_installed(), "This test requires Schrodinger's suite")
def test_structconvert():
"""Test run_structconvert() function."""
benzene_path = utils.get_data_filename("chemicals/benzene/benzene.pdb")
def collect_lines(pdb_path):
"""Collect all HETATM and CONNECT lines in the pdb."""
all_lines = []
with open(pdb_path, 'r') as f:
for line in f:
field = line[:6]
n_atom = line[10:11]
if n_atom == '4' or n_atom == '9':
continue # Discard atoms 4 and 9 which have a -0.000
if field == 'HETATM' or field == 'CONECT':
all_lines.append(line.strip())
return all_lines
with mdtraj.utils.enter_temp_directory():
# Convert from pdb to mol2 and back
run_structconvert(benzene_path, 'benzene.mol2')
run_structconvert('benzene.mol2', 'benzene.pdb')
new_lines = collect_lines('benzene.pdb')
# The new pdb should be equal to the old one
original_lines = collect_lines(benzene_path)
assert original_lines == new_lines
@unittest.skipIf(not is_schrodinger_suite_installed(), "This test requires Schrodinger's suite")
def test_proplister():
"""Test run_proplister() function."""
benzene_path = utils.get_data_filename("chemicals/benzene/benzene.sdf")
properties = run_proplister(benzene_path)
assert len(properties) == 1
assert len(properties[0]) == 23
# Test subset of properties
expected = {'i_sd_PUBCHEM_COMPOUND_CID': '241',
'r_sd_PUBCHEM_CONFORMER_RMSD': '0.4',
'i_sd_PUBCHEM_CONFORMER_DIVERSEORDER': '1',
's_sd_PUBCHEM_MMFF94_PARTIAL_CHARGES': ('12\n1 -0.15\n10 0.15\n11 0.15\n'
'12 0.15\n2 -0.15\n3 -0.15\n4 -0.15\n'
'5 -0.15\n6 -0.15\n7 0.15\n8 0.15\n'
'9 0.15')
}
assert set(expected.items()) < set(properties[0].items())
@unittest.skipIf(not is_schrodinger_suite_installed(), "This test requires Schrodinger's suite")
def test_epik_maesubset_autoconvert():
"""Test run_epik and run_maesubset functions and autoconvert_maestro decorator."""
imatinib_path = utils.get_data_filename("chemicals/imatinib/imatinib.sdf")
with mdtraj.utils.enter_temp_directory():
run_structconvert(imatinib_path, 'imatinib.mae')
run_epik('imatinib.mae', 'imatinib-epik.mae', ph=7.0)
run_maesubset('imatinib-epik.mae', 'imatinib02.mae', range=[0, 2])
run_structconvert('imatinib02.mae', 'imatinib02.sdf')
# The 4 lines above should be equivalent to
run_epik(imatinib_path, 'imatinib-auto02.sdf', ph=7.0, tautomerize=True,
extract_range=[0, 2])
# Check that results contain indeed 2 molecules
assert len(run_proplister('imatinib02.sdf')) == 2 # 2 molecules
assert len(run_proplister('imatinib-auto02.sdf')) == 2
# Check that results are identical
with open('imatinib02.sdf', 'r') as f:
lines = f.readlines()
with open('imatinib-auto02.sdf', 'r') as f:
assert f.readlines() == lines
| choderalab/openmoltools | openmoltools/tests/test_schrodinger.py | Python | mit | 3,453 | [
"MDTraj"
] | aca8db1d077993d7efcaeec8667a0cd222b8c198e885f626efdd601854476197 |
""" TaskQueueDB class is a front-end to the task queues db
"""
__RCSID__ = "$Id"
import random
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.WorkloadManagementSystem.private.SharesCorrector import SharesCorrector
from DIRAC.WorkloadManagementSystem.private.Queues import maxCPUSegments
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities import List
from DIRAC.Core.Utilities.PrettyPrint import printDict
from DIRAC.Core.Utilities.DictCache import DictCache
from DIRAC.Core.Base.DB import DB
from DIRAC.Core.Security import Properties, CS
DEFAULT_GROUP_SHARE = 1000
TQ_MIN_SHARE = 0.001
singleValueDefFields = ( 'OwnerDN', 'OwnerGroup', 'Setup', 'CPUTime' )
multiValueDefFields = ( 'Sites', 'GridCEs', 'GridMiddlewares', 'BannedSites',
'Platforms', 'PilotTypes', 'SubmitPools', 'JobTypes', 'Tags' )
multiValueMatchFields = ( 'GridCE', 'Site', 'GridMiddleware', 'Platform',
'PilotType', 'SubmitPool', 'JobType', 'Tag' )
tagMatchFields = ( 'Tag', )
bannedJobMatchFields = ( 'Site', )
strictRequireMatchFields = ( 'SubmitPool', 'Platform', 'PilotType', 'Tag' )
mandatoryMatchFields = ( 'Setup', 'CPUTime' )
priorityIgnoredFields = ( 'Sites', 'BannedSites' )
class TaskQueueDB( DB ):
def __init__( self ):
random.seed()
DB.__init__( self, 'TaskQueueDB', 'WorkloadManagement/TaskQueueDB' )
self.__maxJobsInTQ = 5000
self.__defaultCPUSegments = maxCPUSegments
self.__maxMatchRetry = 3
self.__jobPriorityBoundaries = ( 0.001, 10 )
self.__groupShares = {}
self.__deleteTQWithDelay = DictCache( self.__deleteTQIfEmpty )
self.__opsHelper = Operations()
self.__ensureInsertionIsSingle = False
self.__sharesCorrector = SharesCorrector( self.__opsHelper )
result = self.__initializeDB()
if not result[ 'OK' ]:
raise Exception( "Can't create tables: %s" % result[ 'Message' ] )
def enableAllTaskQueues( self ):
""" Enable all Task queues
"""
return self.updateFields( "tq_TaskQueues", updateDict = { "Enabled" :"1" } )
def findOrphanJobs( self ):
""" Find jobs that are not in any task queue
"""
result = self._query( "select JobID from tq_Jobs WHERE TQId not in (SELECT TQId from tq_TaskQueues)" )
if not result[ 'OK' ]:
return result
return S_OK( [ row[0] for row in result[ 'Value' ] ] )
def isSharesCorrectionEnabled( self ):
return self.__getCSOption( "EnableSharesCorrection", False )
def __getCSOption( self, optionName, defValue ):
return self.__opsHelper.getValue( "JobScheduling/%s" % optionName, defValue )
def getValidPilotTypes( self ):
return self.__getCSOption( "AllPilotTypes", [ 'private' ] )
def __initializeDB( self ):
"""
Create the tables
"""
result = self._query( "show tables" )
if not result[ 'OK' ]:
return result
tablesInDB = [ t[0] for t in result[ 'Value' ] ]
tablesToCreate = {}
self.__tablesDesc = {}
self.__tablesDesc[ 'tq_TaskQueues' ] = { 'Fields' : { 'TQId' : 'INTEGER(10) UNSIGNED AUTO_INCREMENT NOT NULL',
'OwnerDN' : 'VARCHAR(255) NOT NULL',
'OwnerGroup' : 'VARCHAR(32) NOT NULL',
'Setup' : 'VARCHAR(32) NOT NULL',
'CPUTime' : 'BIGINT(20) UNSIGNED NOT NULL',
'Priority' : 'FLOAT NOT NULL',
'Enabled' : 'TINYINT(1) NOT NULL DEFAULT 0'
},
'PrimaryKey' : 'TQId',
'Indexes': { 'TQOwner': [ 'OwnerDN', 'OwnerGroup',
'Setup', 'CPUTime' ]
}
}
self.__tablesDesc[ 'tq_Jobs' ] = { 'Fields' : { 'TQId' : 'INTEGER(10) UNSIGNED NOT NULL',
'JobId' : 'INTEGER(11) UNSIGNED NOT NULL',
'Priority' : 'INTEGER UNSIGNED NOT NULL',
'RealPriority' : 'FLOAT NOT NULL'
},
'PrimaryKey' : 'JobId',
'Indexes': { 'TaskIndex': [ 'TQId' ] },
}
for multiField in multiValueDefFields:
tableName = 'tq_TQTo%s' % multiField
self.__tablesDesc[ tableName ] = { 'Fields' : { 'TQId' : 'INTEGER UNSIGNED NOT NULL',
'Value' : 'VARCHAR(64) NOT NULL'
},
'Indexes': { 'TaskIndex': [ 'TQId' ], '%sIndex' % multiField: [ 'Value' ] },
}
for tableName in self.__tablesDesc:
if not tableName in tablesInDB:
tablesToCreate[ tableName ] = self.__tablesDesc[ tableName ]
return self._createTables( tablesToCreate )
def getGroupsInTQs( self ):
cmdSQL = "SELECT DISTINCT( OwnerGroup ) FROM `tq_TaskQueues`"
result = self._query( cmdSQL )
if not result[ 'OK' ]:
return result
return S_OK( [ row[0] for row in result[ 'Value' ] ] )
def forceRecreationOfTables( self ):
dropSQL = "DROP TABLE IF EXISTS %s" % ", ".join( self.__tablesDesc )
result = self._update( dropSQL )
if not result[ 'OK' ]:
return result
return self._createTables( self.__tablesDesc )
def fitCPUTimeToSegments( self, cpuTime ):
"""
Fit the CPU time to the valid segments
"""
maxCPUSegments = self.__getCSOption( "taskQueueCPUTimeIntervals", self.__defaultCPUSegments )
try:
maxCPUSegments = [ int( seg ) for seg in maxCPUSegments ]
#Check segments in the CS
last = 0
for cpuS in maxCPUSegments:
if cpuS <= last:
maxCPUSegments = self.__defaultCPUSegments
break
last = cpuS
except:
maxCPUSegments = self.__defaultCPUSegments
#Map to a segment
for iP in range( len( maxCPUSegments ) ):
cpuSegment = maxCPUSegments[ iP ]
if cpuTime <= cpuSegment:
return cpuSegment
return maxCPUSegments[-1]
def _checkTaskQueueDefinition( self, tqDefDict ):
"""
Check a task queue definition dict is valid
"""
# Confine the SystemConfigs legacy option here, use Platform everywhere else
# until the SystemConfigs is no more used in the TaskQueueDB
if 'SystemConfigs' in tqDefDict and not "Platforms" in tqDefDict:
tqDefDict['Platforms'] = tqDefDict['SystemConfigs']
for field in singleValueDefFields:
if field not in tqDefDict:
return S_ERROR( "Missing mandatory field '%s' in task queue definition" % field )
if field in [ "CPUTime" ]:
if not isinstance( tqDefDict[field], ( int, long ) ):
return S_ERROR( "Mandatory field %s value type is not valid: %s" % ( field, type( tqDefDict[field] ) ) )
else:
if not isinstance( tqDefDict[field], basestring ):
return S_ERROR( "Mandatory field %s value type is not valid: %s" % ( field, type( tqDefDict[field] ) ) )
result = self._escapeString( tqDefDict[ field ] )
if not result[ 'OK' ]:
return result
tqDefDict[ field ] = result[ 'Value' ]
for field in multiValueDefFields:
if field not in tqDefDict:
continue
if not isinstance( tqDefDict[field], ( list, tuple ) ):
return S_ERROR( "Multi value field %s value type is not valid: %s" % ( field, type( tqDefDict[field] ) ) )
result = self._escapeValues( tqDefDict[ field ] )
if not result[ 'OK' ]:
return result
tqDefDict[ field ] = result[ 'Value' ]
return S_OK( tqDefDict )
def _checkMatchDefinition( self, tqMatchDict ):
"""
Check a task queue match dict is valid
"""
def travelAndCheckType( value, validTypes, escapeValues = True ):
if isinstance( value, ( list, tuple ) ):
for subValue in value:
if not isinstance( subValue, validTypes ):
return S_ERROR( "List contained type %s is not valid -> %s" % ( type( subValue ), validTypes ) )
if escapeValues:
return self._escapeValues( value )
return S_OK( value )
else:
if not isinstance( value, validTypes ):
return S_ERROR( "Type %s is not valid -> %s" % ( type( value ), validTypes ) )
if escapeValues:
return self._escapeString( value )
return S_OK( value )
# Confine the SystemConfig legacy option here, use Platform everywhere else
# until the SystemConfig is no more used in the TaskQueueDB
if 'SystemConfig' in tqMatchDict and not "Platform" in tqMatchDict:
tqMatchDict['Platform'] = tqMatchDict['SystemConfig']
for field in singleValueDefFields:
if field not in tqMatchDict:
if field in mandatoryMatchFields:
return S_ERROR( "Missing mandatory field '%s' in match request definition" % field )
continue
fieldValue = tqMatchDict[ field ]
if field in [ "CPUTime" ]:
result = travelAndCheckType( fieldValue, ( int, long ), escapeValues = False )
else:
result = travelAndCheckType( fieldValue, basestring )
if not result[ 'OK' ]:
return S_ERROR( "Match definition field %s failed : %s" % ( field, result[ 'Message' ] ) )
tqMatchDict[ field ] = result[ 'Value' ]
#Check multivalue
for multiField in multiValueMatchFields:
for field in ( multiField, "Banned%s" % multiField, "Required%s" % multiField ):
if field in tqMatchDict:
fieldValue = tqMatchDict[ field ]
result = travelAndCheckType( fieldValue, basestring )
if not result[ 'OK' ]:
return S_ERROR( "Match definition field %s failed : %s" % ( field, result[ 'Message' ] ) )
tqMatchDict[ field ] = result[ 'Value' ]
return S_OK( tqMatchDict )
def __createTaskQueue( self, tqDefDict, priority = 1, connObj = False ):
"""
Create a task queue
Returns S_OK( tqId ) / S_ERROR
"""
if not connObj:
result = self._getConnection()
if not result[ 'OK' ]:
return S_ERROR( "Can't create task queue: %s" % result[ 'Message' ] )
connObj = result[ 'Value' ]
tqDefDict[ 'CPUTime' ] = self.fitCPUTimeToSegments( tqDefDict[ 'CPUTime' ] )
sqlSingleFields = [ 'TQId', 'Priority' ]
sqlValues = [ "0", str( priority ) ]
for field in singleValueDefFields:
sqlSingleFields.append( field )
sqlValues.append( tqDefDict[ field ] )
#Insert the TQ Disabled
sqlSingleFields.append( "Enabled" )
sqlValues.append( "0" )
cmd = "INSERT INTO tq_TaskQueues ( %s ) VALUES ( %s )" % ( ", ".join( sqlSingleFields ), ", ".join( [ str( v ) for v in sqlValues ] ) )
result = self._update( cmd, conn = connObj )
if not result[ 'OK' ]:
self.log.error( "Can't insert TQ in DB", result[ 'Value' ] )
return result
if 'lastRowId' in result:
tqId = result['lastRowId']
else:
result = self._query( "SELECT LAST_INSERT_ID()", conn = connObj )
if not result[ 'OK' ]:
self.cleanOrphanedTaskQueues( connObj = connObj )
return S_ERROR( "Can't determine task queue id after insertion" )
tqId = result[ 'Value' ][0][0]
for field in multiValueDefFields:
if field not in tqDefDict:
continue
values = List.uniqueElements( [ value for value in tqDefDict[ field ] if value.strip() ] )
if not values:
continue
cmd = "INSERT INTO `tq_TQTo%s` ( TQId, Value ) VALUES " % field
cmd += ", ".join( [ "( %s, %s )" % ( tqId, str( value ) ) for value in values ] )
result = self._update( cmd, conn = connObj )
if not result[ 'OK' ]:
self.log.error( "Failed to insert %s condition" % field, result[ 'Message' ] )
self.cleanOrphanedTaskQueues( connObj = connObj )
return S_ERROR( "Can't insert values %s for field %s: %s" % ( str( values ), field, result[ 'Message' ] ) )
self.log.info( "Created TQ %s" % tqId )
return S_OK( tqId )
def cleanOrphanedTaskQueues( self, connObj = False ):
"""
Delete all empty task queues
"""
self.log.info( "Cleaning orphaned TQs" )
result = self._update( "DELETE FROM `tq_TaskQueues` WHERE Enabled >= 1 AND TQId not in ( SELECT DISTINCT TQId from `tq_Jobs` )", conn = connObj )
if not result[ 'OK' ]:
return result
for mvField in multiValueDefFields:
result = self._update( "DELETE FROM `tq_TQTo%s` WHERE TQId not in ( SELECT DISTINCT TQId from `tq_TaskQueues` )" % mvField,
conn = connObj )
if not result[ 'OK' ]:
return result
return S_OK()
def __setTaskQueueEnabled( self, tqId, enabled = True, connObj = False ):
if enabled:
enabled = "+ 1"
else:
enabled = "- 1"
upSQL = "UPDATE `tq_TaskQueues` SET Enabled = Enabled %s WHERE TQId=%d" % ( enabled, tqId )
result = self._update( upSQL, conn = connObj )
if not result[ 'OK' ]:
self.log.error( "Error setting TQ state", "TQ %s State %s: %s" % ( tqId, enabled, result[ 'Message' ] ) )
return result
updated = result['Value'] > 0
if updated:
self.log.info( "Set enabled = %s for TQ %s" % ( enabled, tqId ) )
return S_OK( updated )
def __hackJobPriority( self, jobPriority ):
jobPriority = min( max( int( jobPriority ), self.__jobPriorityBoundaries[0] ), self.__jobPriorityBoundaries[1] )
if jobPriority == self.__jobPriorityBoundaries[0]:
return 10 ** ( -5 )
if jobPriority == self.__jobPriorityBoundaries[1]:
return 10 ** 6
return jobPriority
def insertJob( self, jobId, tqDefDict, jobPriority, skipTQDefCheck = False ):
"""
Insert a job in a task queue
Returns S_OK( tqId ) / S_ERROR
"""
try:
long( jobId )
except ValueError:
return S_ERROR( "JobId is not a number!" )
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return S_ERROR( "Can't insert job: %s" % retVal[ 'Message' ] )
connObj = retVal[ 'Value' ]
if not skipTQDefCheck:
tqDefDict = dict( tqDefDict )
retVal = self._checkTaskQueueDefinition( tqDefDict )
if not retVal[ 'OK' ]:
self.log.error( "TQ definition check failed", retVal[ 'Message' ] )
return retVal
tqDefDict = retVal[ 'Value' ]
tqDefDict[ 'CPUTime' ] = self.fitCPUTimeToSegments( tqDefDict[ 'CPUTime' ] )
self.log.info( "Inserting job %s with requirements: %s" % ( jobId, printDict( tqDefDict ) ) )
retVal = self.__findAndDisableTaskQueue( tqDefDict, skipDefinitionCheck = True, connObj = connObj )
if not retVal[ 'OK' ]:
return retVal
tqInfo = retVal[ 'Value' ]
newTQ = False
if not tqInfo[ 'found' ]:
self.log.info( "Creating a TQ for job %s" % jobId )
retVal = self.__createTaskQueue( tqDefDict, 1, connObj = connObj )
if not retVal[ 'OK' ]:
return retVal
tqId = retVal[ 'Value' ]
newTQ = True
else:
tqId = tqInfo[ 'tqId' ]
self.log.info( "Found TQ %s for job %s requirements" % ( tqId, jobId ) )
try:
result = self.__insertJobInTaskQueue( jobId, tqId, int( jobPriority ), checkTQExists = False, connObj = connObj )
if not result[ 'OK' ]:
self.log.error( "Error inserting job in TQ", "Job %s TQ %s: %s" % ( jobId, tqId, result[ 'Message' ] ) )
return result
if newTQ:
self.recalculateTQSharesForEntity( tqDefDict[ 'OwnerDN' ], tqDefDict[ 'OwnerGroup' ], connObj = connObj )
finally:
self.__setTaskQueueEnabled( tqId, True )
return S_OK()
def __insertJobInTaskQueue( self, jobId, tqId, jobPriority, checkTQExists = True, connObj = False ):
"""
Insert a job in a given task queue
"""
self.log.info( "Inserting job %s in TQ %s with priority %s" % ( jobId, tqId, jobPriority ) )
if not connObj:
result = self._getConnection()
if not result[ 'OK' ]:
return S_ERROR( "Can't insert job: %s" % result[ 'Message' ] )
connObj = result[ 'Value' ]
if checkTQExists:
result = self._query( "SELECT tqId FROM `tq_TaskQueues` WHERE TQId = %s" % tqId, conn = connObj )
if not result[ 'OK' ] or len ( result[ 'Value' ] ) == 0:
return S_OK( "Can't find task queue with id %s: %s" % ( tqId, result[ 'Message' ] ) )
hackedPriority = self.__hackJobPriority( jobPriority )
result = self._update( "INSERT INTO tq_Jobs ( TQId, JobId, Priority, RealPriority ) \
VALUES ( %s, %s, %s, %f ) ON DUPLICATE KEY UPDATE TQId = %s, \
Priority = %s, RealPriority = %f" % ( tqId, jobId, jobPriority, hackedPriority,
tqId, jobPriority, hackedPriority ),
conn = connObj )
if not result[ 'OK' ]:
return result
return S_OK()
def __generateTQFindSQL( self, tqDefDict, skipDefinitionCheck = False ):
"""
Find a task queue that has exactly the same requirements
"""
if not skipDefinitionCheck:
tqDefDict = dict( tqDefDict )
result = self._checkTaskQueueDefinition( tqDefDict )
if not result[ 'OK' ]:
return result
tqDefDict = result[ 'Value' ]
sqlCondList = []
for field in singleValueDefFields:
sqlCondList.append( "`tq_TaskQueues`.%s = %s" % ( field, tqDefDict[ field ] ) )
#MAGIC SUBQUERIES TO ENSURE STRICT MATCH
for field in multiValueDefFields:
tableName = '`tq_TQTo%s`' % field
if field in tqDefDict and tqDefDict[ field ]:
firstQuery = "SELECT COUNT(%s.Value) \
FROM %s \
WHERE %s.TQId = `tq_TaskQueues`.TQId" % ( tableName, tableName, tableName )
grouping = "GROUP BY %s.TQId" % tableName
valuesList = List.uniqueElements( [ value.strip() for value in tqDefDict[ field ] if value.strip() ] )
numValues = len( valuesList )
secondQuery = "%s AND %s.Value in (%s)" % ( firstQuery, tableName,
",".join( [ "%s" % str( value ) for value in valuesList ] ) )
sqlCondList.append( "%s = (%s %s)" % ( numValues, firstQuery, grouping ) )
sqlCondList.append( "%s = (%s %s)" % ( numValues, secondQuery, grouping ) )
else:
sqlCondList.append( "`tq_TaskQueues`.TQId not in ( SELECT DISTINCT %s.TQId from %s )" % ( tableName, tableName ) )
#END MAGIC: That was easy ;)
return S_OK( " AND ".join( sqlCondList ) )
def __findAndDisableTaskQueue( self, tqDefDict, skipDefinitionCheck = False, retries = 10, connObj = False ):
""" Disable and find TQ
"""
for _ in range( retries ):
result = self.__findSmallestTaskQueue( tqDefDict, skipDefinitionCheck = skipDefinitionCheck, connObj = connObj )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
if not data[ 'found' ]:
return result
if data[ 'enabled' ] < 1:
gLogger.notice( "TaskQueue {tqId} seems to be already disabled ({enabled})".format( **data ) )
result = self.__setTaskQueueEnabled( data[ 'tqId' ], False )
if result[ 'OK' ]:
return S_OK( data )
return S_ERROR( "Could not disable TQ" )
def __findSmallestTaskQueue( self, tqDefDict, skipDefinitionCheck = False, connObj = False ):
"""
Find a task queue that has exactly the same requirements
"""
result = self.__generateTQFindSQL( tqDefDict, skipDefinitionCheck = skipDefinitionCheck)
if not result[ 'OK' ]:
return result
sqlCmd = "SELECT COUNT( `tq_Jobs`.JobID ), `tq_TaskQueues`.TQId, `tq_TaskQueues`.Enabled FROM `tq_TaskQueues`, `tq_Jobs`"
sqlCmd = "%s WHERE `tq_TaskQueues`.TQId = `tq_Jobs`.TQId AND %s GROUP BY `tq_Jobs`.TQId ORDER BY COUNT( `tq_Jobs`.JobID ) ASC" % ( sqlCmd, result[ 'Value' ] )
result = self._query( sqlCmd, conn = connObj )
if not result[ 'OK' ]:
return S_ERROR( "Can't find task queue: %s" % result[ 'Message' ] )
data = result[ 'Value' ]
if len( data ) == 0 or data[0][0] >= self.__maxJobsInTQ:
return S_OK( { 'found' : False } )
return S_OK( { 'found' : True, 'tqId' : data[0][1], 'enabled' : data[0][2], 'jobs' : data[0][0] } )
def matchAndGetJob( self, tqMatchDict, numJobsPerTry = 50, numQueuesPerTry = 10, negativeCond = {} ):
"""
Match a job
"""
#Make a copy to avoid modification of original if escaping needs to be done
tqMatchDict = dict( tqMatchDict )
retVal = self._checkMatchDefinition( tqMatchDict )
if not retVal[ 'OK' ]:
self.log.error( "TQ match request check failed", retVal[ 'Message' ] )
return retVal
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return S_ERROR( "Can't connect to DB: %s" % retVal[ 'Message' ] )
connObj = retVal[ 'Value' ]
preJobSQL = "SELECT `tq_Jobs`.JobId, `tq_Jobs`.TQId FROM `tq_Jobs` WHERE `tq_Jobs`.TQId = %s AND `tq_Jobs`.Priority = %s"
prioSQL = "SELECT `tq_Jobs`.Priority FROM `tq_Jobs` WHERE `tq_Jobs`.TQId = %s ORDER BY RAND() / `tq_Jobs`.RealPriority ASC LIMIT 1"
postJobSQL = " ORDER BY `tq_Jobs`.JobId ASC LIMIT %s" % numJobsPerTry
for _ in range( self.__maxMatchRetry ):
noJobsFound = False
if 'JobID' in tqMatchDict:
# A certain JobID is required by the resource, so all TQ are to be considered
retVal = self.matchAndGetTaskQueue( tqMatchDict,
numQueuesToGet = 0,
skipMatchDictDef = True,
connObj = connObj )
preJobSQL = "%s AND `tq_Jobs`.JobId = %s " % ( preJobSQL, tqMatchDict['JobID'] )
else:
retVal = self.matchAndGetTaskQueue( tqMatchDict,
numQueuesToGet = numQueuesPerTry,
skipMatchDictDef = True,
negativeCond = negativeCond,
connObj = connObj )
if not retVal[ 'OK' ]:
return retVal
tqList = retVal[ 'Value' ]
if len( tqList ) == 0:
self.log.info( "No TQ matches requirements" )
return S_OK( { 'matchFound' : False, 'tqMatch' : tqMatchDict } )
for tqId, tqOwnerDN, tqOwnerGroup in tqList:
self.log.info( "Trying to extract jobs from TQ %s" % tqId )
retVal = self._query( prioSQL % tqId, conn = connObj )
if not retVal[ 'OK' ]:
return S_ERROR( "Can't retrieve winning priority for matching job: %s" % retVal[ 'Message' ] )
if len( retVal[ 'Value' ] ) == 0:
noJobsFound = True
continue
prio = retVal[ 'Value' ][0][0]
retVal = self._query( "%s %s" % ( preJobSQL % ( tqId, prio ), postJobSQL ), conn = connObj )
if not retVal[ 'OK' ]:
return S_ERROR( "Can't begin transaction for matching job: %s" % retVal[ 'Message' ] )
jobTQList = [ ( row[0], row[1] ) for row in retVal[ 'Value' ] ]
if len( jobTQList ) == 0:
gLogger.info( "Task queue %s seems to be empty, triggering a cleaning" % tqId )
self.__deleteTQWithDelay.add( tqId, 300, ( tqId, tqOwnerDN, tqOwnerGroup ) )
while len( jobTQList ) > 0:
jobId, tqId = jobTQList.pop( random.randint( 0, len( jobTQList ) - 1 ) )
self.log.info( "Trying to extract job %s from TQ %s" % ( jobId, tqId ) )
retVal = self.deleteJob( jobId, connObj = connObj )
if not retVal[ 'OK' ]:
msgFix = "Could not take job"
msgVar = " %s out from the TQ %s: %s" % ( jobId, tqId, retVal[ 'Message' ] )
self.log.error( msgFix, msgVar )
return S_ERROR( msgFix + msgVar )
if retVal['Value']:
self.log.info( "Extracted job %s with prio %s from TQ %s" % ( jobId, prio, tqId ) )
return S_OK( { 'matchFound' : True, 'jobId' : jobId, 'taskQueueId' : tqId, 'tqMatch' : tqMatchDict } )
self.log.info( "No jobs could be extracted from TQ %s" % tqId )
if noJobsFound:
return S_OK( { 'matchFound' : False, 'tqMatch' : tqMatchDict } )
else:
self.log.info( "Could not find a match after %s match retries" % self.__maxMatchRetry )
return S_ERROR( "Could not find a match after %s match retries" % self.__maxMatchRetry )
def matchAndGetTaskQueue( self, tqMatchDict, numQueuesToGet = 1, skipMatchDictDef = False,
negativeCond = {}, connObj = False ):
""" Get a queue that matches the requirements
"""
#Make a copy to avoid modification of original if escaping needs to be done
tqMatchDict = dict( tqMatchDict )
if not skipMatchDictDef:
retVal = self._checkMatchDefinition( tqMatchDict )
if not retVal[ 'OK' ]:
return retVal
retVal = self.__generateTQMatchSQL( tqMatchDict, numQueuesToGet = numQueuesToGet, negativeCond = negativeCond )
if not retVal[ 'OK' ]:
return retVal
matchSQL = retVal[ 'Value' ]
retVal = self._query( matchSQL, conn = connObj )
if not retVal[ 'OK' ]:
return retVal
return S_OK( [ ( row[0], row[1], row[2] ) for row in retVal[ 'Value' ] ] )
def __generateSQLSubCond( self, sqlString, value, boolOp = 'OR' ):
if not isinstance( value, (list, tuple) ):
return sqlString % str( value ).strip()
sqlORList = []
for v in value:
sqlORList.append( sqlString % str( v ).strip() )
return "( %s )" % ( " %s " % boolOp ).join( sqlORList )
def __generateNotSQL( self, tableDict, negativeCond ):
""" Generate negative conditions
Can be a list of dicts or a dict:
- list of dicts will be OR of conditional dicts
- dicts will be normal conditional dict ( kay1 in ( v1, v2, ... ) AND key2 in ( v3, v4, ... ) )
"""
if isinstance(negativeCond, (list, tuple) ):
sqlCond = []
for cD in negativeCond:
sqlCond.append( self.__generateNotDictSQL( tableDict, cD ) )
return " ( %s )" % " OR ".join( sqlCond )
elif isinstance(negativeCond, dict):
return self.__generateNotDictSQL( tableDict, negativeCond )
raise RuntimeError( "negativeCond has to be either a list or a dict or a tuple, and it's %s" % type( negativeCond ) )
def __generateNotDictSQL( self, tableDict, negativeCond ):
""" Generate the negative sql condition from a standard condition dict
not ( cond1 and cond2 ) = ( not cond1 or not cond 2 )
For instance: { 'Site': 'S1', 'JobType': [ 'T1', 'T2' ] }
( not 'S1' in Sites or ( not 'T1' in JobType and not 'T2' in JobType ) )
S2 T1 -> not False or ( not True and not False ) -> True or ... -> True -> Eligible
S1 T3 -> not True or ( not False and not False ) -> False or (True and True ) -> True -> Eligible
S1 T1 -> not True or ( not True and not False ) -> False or ( False and True ) -> False -> Nop
"""
condList = []
for field in negativeCond:
if field in multiValueMatchFields:
fullTableN = '`tq_TQTo%ss`' % field
valList = negativeCond[ field ]
if not isinstance( valList, (list, tuple) ):
valList = ( valList, )
subList = []
for value in valList:
value = self._escapeString( value )[ 'Value' ]
sql = "%s NOT IN ( SELECT %s.Value FROM %s WHERE %s.TQId = tq.TQId )" % ( value,
fullTableN,
fullTableN,
fullTableN )
subList.append( sql )
condList.append( "( %s )" % " AND ".join( subList ) )
elif field in singleValueDefFields:
for value in negativeCond[field]:
value = self._escapeString( value )[ 'Value' ]
sql = "%s != tq.%s " % ( value, field )
condList.append( sql )
return "( %s )" % " OR ".join( condList )
def __generateTablesName( self, sqlTables, field ):
fullTableName = 'tq_TQTo%ss' % field
if fullTableName not in sqlTables:
tableN = field.lower()
sqlTables[ fullTableName ] = tableN
return tableN, "`%s`" % fullTableName,
return sqlTables[ fullTableName ], "`%s`" % fullTableName
def __generateTQMatchSQL( self, tqMatchDict, numQueuesToGet = 1, negativeCond = {} ):
"""
Generate the SQL needed to match a task queue
"""
#Only enabled TQs
sqlCondList = []
sqlTables = { "tq_TaskQueues" : "tq" }
#If OwnerDN and OwnerGroup are defined only use those combinations that make sense
if 'OwnerDN' in tqMatchDict and 'OwnerGroup' in tqMatchDict:
groups = tqMatchDict[ 'OwnerGroup' ]
if not isinstance(groups, (list, tuple)):
groups = [ groups ]
dns = tqMatchDict[ 'OwnerDN' ]
if not isinstance( dns, (list, tuple) ):
dns = [ dns ]
ownerConds = []
for group in groups:
if Properties.JOB_SHARING in CS.getPropertiesForGroup( group.replace( '"', "" ) ):
ownerConds.append( "tq.OwnerGroup = %s" % group )
else:
for dn in dns:
ownerConds.append( "( tq.OwnerDN = %s AND tq.OwnerGroup = %s )" % ( dn, group ) )
sqlCondList.append( " OR ".join( ownerConds ) )
else:
#If not both are defined, just add the ones that are defined
for field in ( 'OwnerGroup', 'OwnerDN' ):
if field in tqMatchDict:
sqlCondList.append( self.__generateSQLSubCond( "tq.%s = %%s" % field,
tqMatchDict[ field ] ) )
#Type of single value conditions
for field in ( 'CPUTime', 'Setup' ):
if field in tqMatchDict:
if field == 'CPUTime':
sqlCondList.append( self.__generateSQLSubCond( "tq.%s <= %%s" % field, tqMatchDict[ field ] ) )
else:
sqlCondList.append( self.__generateSQLSubCond( "tq.%s = %%s" % field, tqMatchDict[ field ] ) )
#Match multi value fields
for field in multiValueMatchFields:
#It has to be %ss , with an 's' at the end because the columns names
# are plural and match options are singular
if field in tqMatchDict and tqMatchDict[ field ]:
_, fullTableN = self.__generateTablesName( sqlTables, field )
sqlMultiCondList = []
# if field != 'GridCE' or 'Site' in tqMatchDict:
# Jobs for masked sites can be matched if they specified a GridCE
# Site is removed from tqMatchDict if the Site is mask. In this case we want
# that the GridCE matches explicitly so the COUNT can not be 0. In this case we skip this
# condition
sqlMultiCondList.append( "( SELECT COUNT(%s.Value) FROM %s WHERE %s.TQId = tq.TQId ) = 0" % ( fullTableN,
fullTableN,
fullTableN ) )
rsql = None
if field in tagMatchFields:
if tqMatchDict[field] != '"Any"':
csql = self.__generateTagSQLSubCond( fullTableN, tqMatchDict[field] )
# Add required tag condition
for field in tagMatchFields:
fieldName = "Required%s" % field
requiredTags = tqMatchDict.get( fieldName, '' )
if requiredTags:
rsql = self.__generateRequiredTagSQLSubCond( fullTableN, requiredTags )
else:
csql = self.__generateSQLSubCond( "%%s IN ( SELECT %s.Value \
FROM %s \
WHERE %s.TQId = tq.TQId )" % ( fullTableN,
fullTableN,
fullTableN ),
tqMatchDict[ field ] )
sqlMultiCondList.append( csql )
if rsql is not None:
sqlCondList.append( rsql )
sqlCondList.append( "( %s )" % " OR ".join( sqlMultiCondList ) )
#In case of Site, check it's not in job banned sites
if field in bannedJobMatchFields:
fullTableN = '`tq_TQToBanned%ss`' % field
csql = self.__generateSQLSubCond( "%%s not in ( SELECT %s.Value \
FROM %s \
WHERE %s.TQId = tq.TQId )" % ( fullTableN,
fullTableN,
fullTableN ),
tqMatchDict[ field ], boolOp = 'OR' )
sqlCondList.append( csql )
#Resource banning
bannedField = "Banned%s" % field
if bannedField in tqMatchDict and tqMatchDict[ bannedField ]:
fullTableN = '`tq_TQTo%ss`' % field
csql = self.__generateSQLSubCond( "%%s not in ( SELECT %s.Value \
FROM %s \
WHERE %s.TQId = tq.TQId )" % ( fullTableN,
fullTableN,
fullTableN ),
tqMatchDict[ bannedField ], boolOp = 'OR' )
sqlCondList.append( csql )
#For certain fields, the requirement is strict. If it is not in the tqMatchDict, the job cannot require it
for field in strictRequireMatchFields:
if field in tqMatchDict and tqMatchDict[field]:
continue
fullTableN = '`tq_TQTo%ss`' % field
sqlCondList.append( "( SELECT COUNT(%s.Value) FROM %s WHERE %s.TQId = tq.TQId ) = 0" % ( fullTableN, fullTableN, fullTableN ) )
# Add extra conditions
if negativeCond:
sqlCondList.append( self.__generateNotSQL( sqlTables, negativeCond ) )
#Generate the final query string
tqSqlCmd = "SELECT tq.TQId, tq.OwnerDN, tq.OwnerGroup FROM `tq_TaskQueues` tq WHERE %s" % ( " AND ".join( sqlCondList ) )
#Apply priorities
tqSqlCmd = "%s ORDER BY RAND() / tq.Priority ASC" % tqSqlCmd
#Do we want a limit?
if numQueuesToGet:
tqSqlCmd = "%s LIMIT %s" % ( tqSqlCmd, numQueuesToGet )
return S_OK( tqSqlCmd )
def __generateTagSQLSubCond( self, tableName, tagMatchList ):
""" Generate SQL condition where ALL the specified multiValue requirements must be
present in the matching resource list
"""
sql1 = "SELECT COUNT(%s.Value) FROM %s WHERE %s.TQId=tq.TQId" % ( tableName, tableName, tableName )
if isinstance( tagMatchList, ( list, tuple ) ):
sql2 = sql1 + " AND %s.Value in ( %s )" % ( tableName, ','.join( [ "%s" % v for v in tagMatchList] ) )
else:
sql2 = sql1 + " AND %s.Value=%s" % ( tableName, tagMatchList )
sql = '( '+sql1+' ) = ('+sql2+' )'
return sql
def __generateRequiredTagSQLSubCond( self, tableName, tagMatchList ):
""" Generate SQL condition where the TQ corresponds to the requirements
of the resource
"""
sql = "SELECT COUNT(%s.Value) FROM %s WHERE %s.TQId=tq.TQId" % ( tableName, tableName, tableName )
if isinstance( tagMatchList, ( list, tuple ) ):
sql = sql + " AND %s.Value in ( %s )" % ( tableName, ','.join( [ "%s" % v for v in tagMatchList] ) )
nTags = len( tagMatchList )
else:
sql = sql + " AND %s.Value=%s" % ( tableName, tagMatchList )
nTags = 1
sql = '( %s ) = %s' % ( sql, nTags )
return sql
def deleteJob( self, jobId, connObj = False ):
"""
Delete a job from the task queues
Return S_OK( True/False ) / S_ERROR
"""
if not connObj:
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return S_ERROR( "Can't delete job: %s" % retVal[ 'Message' ] )
connObj = retVal[ 'Value' ]
retVal = self._query( "SELECT t.TQId, t.OwnerDN, t.OwnerGroup FROM `tq_TaskQueues` t, `tq_Jobs` j WHERE j.JobId = %s AND t.TQId = j.TQId" % jobId, conn = connObj )
if not retVal[ 'OK' ]:
return S_ERROR( "Could not get job from task queue %s: %s" % ( jobId, retVal[ 'Message' ] ) )
data = retVal[ 'Value' ]
if not data:
return S_OK( False )
tqId, tqOwnerDN, tqOwnerGroup = data[0]
self.log.info( "Deleting job %s" % jobId )
retVal = self._update( "DELETE FROM `tq_Jobs` WHERE JobId = %s" % jobId, conn = connObj )
if not retVal[ 'OK' ]:
return S_ERROR( "Could not delete job from task queue %s: %s" % ( jobId, retVal[ 'Message' ] ) )
if retVal['Value'] == 0:
#No job deleted
return S_OK( False )
#Always return S_OK() because job has already been taken out from the TQ
self.__deleteTQWithDelay.add( tqId, 300, ( tqId, tqOwnerDN, tqOwnerGroup ) )
return S_OK( True )
def getTaskQueueForJob( self, jobId, connObj = False ):
"""
Return TaskQueue for a given Job
Return S_OK( [TaskQueueID] ) / S_ERROR
"""
if not connObj:
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return S_ERROR( "Can't get TQ for job: %s" % retVal[ 'Message' ] )
connObj = retVal[ 'Value' ]
retVal = self._query( 'SELECT TQId FROM `tq_Jobs` WHERE JobId = %s ' % jobId, conn = connObj )
if not retVal[ 'OK' ]:
return retVal
if not retVal['Value']:
return S_ERROR( 'Not in TaskQueues' )
return S_OK( retVal['Value'][0][0] )
def getTaskQueueForJobs( self, jobIDs, connObj = False ):
"""
Return TaskQueues for a given list of Jobs
"""
if not connObj:
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return S_ERROR( "Can't get TQs for a job list: %s" % retVal[ 'Message' ] )
connObj = retVal[ 'Value' ]
jobString = ','.join( [ str( x ) for x in jobIDs ] )
retVal = self._query( 'SELECT JobId,TQId FROM `tq_Jobs` WHERE JobId in (%s) ' % jobString, conn = connObj )
if not retVal[ 'OK' ]:
return retVal
if not retVal['Value']:
return S_ERROR( 'Not in TaskQueues' )
resultDict = {}
for jobID, TQID in retVal['Value']:
resultDict[int( jobID )] = int( TQID )
return S_OK( resultDict )
def __getOwnerForTaskQueue( self, tqId, connObj = False ):
retVal = self._query( "SELECT OwnerDN, OwnerGroup from `tq_TaskQueues` WHERE TQId=%s" % tqId, conn = connObj )
if not retVal[ 'OK' ]:
return retVal
data = retVal[ 'Value' ]
if len( data ) == 0:
return S_OK( False )
return S_OK( retVal[ 'Value' ][0] )
def __deleteTQIfEmpty( self, args ):
( tqId, tqOwnerDN, tqOwnerGroup ) = args
retries = 3
while retries:
retries -= 1
result = self.deleteTaskQueueIfEmpty( tqId, tqOwnerDN, tqOwnerGroup )
if result[ 'OK' ]:
return
gLogger.error( "Could not delete TQ %s: %s" % ( tqId, result[ 'Message' ] ) )
def deleteTaskQueueIfEmpty( self, tqId, tqOwnerDN = False, tqOwnerGroup = False, connObj = False ):
"""
Try to delete a task queue if its empty
"""
if not connObj:
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return S_ERROR( "Can't insert job: %s" % retVal[ 'Message' ] )
connObj = retVal[ 'Value' ]
if not tqOwnerDN or not tqOwnerGroup:
retVal = self.__getOwnerForTaskQueue( tqId, connObj = connObj )
if not retVal[ 'OK' ]:
return retVal
data = retVal[ 'Value' ]
if not data:
return S_OK( False )
tqOwnerDN, tqOwnerGroup = data
sqlCmd = "DELETE FROM `tq_TaskQueues` WHERE Enabled >= 1 AND `tq_TaskQueues`.TQId = %s" % tqId
sqlCmd = "%s AND `tq_TaskQueues`.TQId not in ( SELECT DISTINCT TQId from `tq_Jobs` )" % sqlCmd
retVal = self._update( sqlCmd, conn = connObj )
if not retVal[ 'OK' ]:
return S_ERROR( "Could not delete task queue %s: %s" % ( tqId, retVal[ 'Message' ] ) )
delTQ = retVal[ 'Value' ]
if delTQ > 0:
for mvField in multiValueDefFields:
retVal = self._update( "DELETE FROM `tq_TQTo%s` WHERE TQId = %s" % ( mvField, tqId ), conn = connObj )
if not retVal[ 'OK' ]:
return retVal
self.recalculateTQSharesForEntity( tqOwnerDN, tqOwnerGroup, connObj = connObj )
self.log.info( "Deleted empty and enabled TQ %s" % tqId )
return S_OK( True )
return S_OK( False )
def deleteTaskQueue( self, tqId, tqOwnerDN = False, tqOwnerGroup = False, connObj = False ):
"""
Try to delete a task queue even if it has jobs
"""
self.log.info( "Deleting TQ %s" % tqId )
if not connObj:
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return S_ERROR( "Can't insert job: %s" % retVal[ 'Message' ] )
connObj = retVal[ 'Value' ]
if not tqOwnerDN or not tqOwnerGroup:
retVal = self.__getOwnerForTaskQueue( tqId, connObj = connObj )
if not retVal[ 'OK' ]:
return retVal
data = retVal[ 'Value' ]
if not data:
return S_OK( False )
tqOwnerDN, tqOwnerGroup = data
sqlCmd = "DELETE FROM `tq_TaskQueues` WHERE `tq_TaskQueues`.TQId = %s" % tqId
retVal = self._update( sqlCmd, conn = connObj )
if not retVal[ 'OK' ]:
return S_ERROR( "Could not delete task queue %s: %s" % ( tqId, retVal[ 'Message' ] ) )
delTQ = retVal[ 'Value' ]
sqlCmd = "DELETE FROM `tq_Jobs` WHERE `tq_Jobs`.TQId = %s" % tqId
retVal = self._update( sqlCmd, conn = connObj )
if not retVal[ 'OK' ]:
return S_ERROR( "Could not delete task queue %s: %s" % ( tqId, retVal[ 'Message' ] ) )
for field in multiValueDefFields:
retVal = self._update( "DELETE FROM `tq_TQTo%s` WHERE TQId = %s" % ( field, tqId ), conn = connObj )
if not retVal[ 'OK' ]:
return retVal
if delTQ > 0:
self.recalculateTQSharesForEntity( tqOwnerDN, tqOwnerGroup, connObj = connObj )
return S_OK( True )
return S_OK( False )
def getMatchingTaskQueues( self, tqMatchDict, negativeCond = False ):
"""
rename to have the same method as exposed in the Matcher
"""
return self.retrieveTaskQueuesThatMatch( tqMatchDict, negativeCond = negativeCond )
def getNumTaskQueues( self ):
"""
Get the number of task queues in the system
"""
sqlCmd = "SELECT COUNT( TQId ) FROM `tq_TaskQueues`"
retVal = self._query( sqlCmd )
if not retVal[ 'OK' ]:
return retVal
return S_OK( retVal[ 'Value' ][0][0] )
def retrieveTaskQueuesThatMatch( self, tqMatchDict, negativeCond = False ):
"""
Get the info of the task queues that match a resource
"""
result = self.matchAndGetTaskQueue( tqMatchDict, numQueuesToGet = 0, negativeCond = negativeCond )
if not result[ 'OK' ]:
return result
return self.retrieveTaskQueues( [ tqTuple[0] for tqTuple in result[ 'Value' ] ] )
def retrieveTaskQueues( self, tqIdList = False ):
"""
Get all the task queues
"""
sqlSelectEntries = [ "`tq_TaskQueues`.TQId", "`tq_TaskQueues`.Priority", "COUNT( `tq_Jobs`.TQId )" ]
sqlGroupEntries = [ "`tq_TaskQueues`.TQId", "`tq_TaskQueues`.Priority" ]
for field in singleValueDefFields:
sqlSelectEntries.append( "`tq_TaskQueues`.%s" % field )
sqlGroupEntries.append( "`tq_TaskQueues`.%s" % field )
sqlCmd = "SELECT %s FROM `tq_TaskQueues`, `tq_Jobs`" % ", ".join( sqlSelectEntries )
sqlTQCond = ""
if tqIdList != False:
if len( tqIdList ) == 0:
return S_OK( {} )
else:
sqlTQCond += " AND `tq_TaskQueues`.TQId in ( %s )" % ", ".join( [ str( id_ ) for id_ in tqIdList ] )
sqlCmd = "%s WHERE `tq_TaskQueues`.TQId = `tq_Jobs`.TQId %s GROUP BY %s" % ( sqlCmd,
sqlTQCond,
", ".join( sqlGroupEntries ) )
retVal = self._query( sqlCmd )
if not retVal[ 'OK' ]:
return S_ERROR( "Can't retrieve task queues info: %s" % retVal[ 'Message' ] )
tqData = {}
for record in retVal[ 'Value' ]:
tqId = record[0]
tqData[ tqId ] = { 'Priority' : record[1], 'Jobs' : record[2] }
record = record[3:]
for iP in range( len( singleValueDefFields ) ):
tqData[ tqId ][ singleValueDefFields[ iP ] ] = record[ iP ]
tqNeedCleaning = False
for field in multiValueDefFields:
table = "`tq_TQTo%s`" % field
sqlCmd = "SELECT %s.TQId, %s.Value FROM %s" % ( table, table, table )
retVal = self._query( sqlCmd )
if not retVal[ 'OK' ]:
return S_ERROR( "Can't retrieve task queues field % info: %s" % ( field, retVal[ 'Message' ] ) )
for record in retVal[ 'Value' ]:
tqId = record[0]
value = record[1]
if not tqId in tqData:
if tqIdList is False or tqId in tqIdList:
self.log.warn( "Task Queue %s is defined in field %s but does not exist, triggering a cleaning" % ( tqId, field ) )
tqNeedCleaning = True
else:
if field not in tqData[ tqId ]:
tqData[ tqId ][ field ] = []
tqData[ tqId ][ field ].append( value )
if tqNeedCleaning:
self.cleanOrphanedTaskQueues()
return S_OK( tqData )
def __updateGlobalShares( self ):
"""
Update internal structure for shares
"""
#Update group shares
self.__groupShares = self.getGroupShares()
#Apply corrections if enabled
if self.isSharesCorrectionEnabled():
result = self.getGroupsInTQs()
if not result[ 'OK' ]:
self.log.error( "Could not get groups in the TQs", result[ 'Message' ] )
activeGroups = result[ 'Value' ]
newShares = {}
for group in activeGroups:
if group in self.__groupShares:
newShares[ group ] = self.__groupShares[ group ]
newShares = self.__sharesCorrector.correctShares( newShares )
for group in self.__groupShares:
if group in newShares:
self.__groupShares[ group ] = newShares[ group ]
def recalculateTQSharesForAll( self ):
"""
Recalculate all priorities for TQ's
"""
if self.isSharesCorrectionEnabled():
self.log.info( "Updating correctors state" )
self.__sharesCorrector.update()
self.__updateGlobalShares()
self.log.info( "Recalculating shares for all TQs" )
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return S_ERROR( "Can't insert job: %s" % retVal[ 'Message' ] )
result = self._query( "SELECT DISTINCT( OwnerGroup ) FROM `tq_TaskQueues`" )
if not result[ 'OK' ]:
return result
for group in [ r[0] for r in result[ 'Value' ] ]:
self.recalculateTQSharesForEntity( "all", group )
return S_OK()
def recalculateTQSharesForEntity( self, userDN, userGroup, connObj = False ):
"""
Recalculate the shares for a userDN/userGroup combo
"""
self.log.info( "Recalculating shares for %s@%s TQs" % ( userDN, userGroup ) )
if userGroup in self.__groupShares:
share = self.__groupShares[ userGroup ]
else:
share = float( DEFAULT_GROUP_SHARE )
if Properties.JOB_SHARING in CS.getPropertiesForGroup( userGroup ):
#If group has JobSharing just set prio for that entry, userDN is irrelevant
return self.__setPrioritiesForEntity( userDN, userGroup, share, connObj = connObj )
selSQL = "SELECT OwnerDN, COUNT(OwnerDN) FROM `tq_TaskQueues` WHERE OwnerGroup='%s' GROUP BY OwnerDN" % ( userGroup )
result = self._query( selSQL, conn = connObj )
if not result[ 'OK' ]:
return result
#Get owners in this group and the amount of times they appear
data = [ ( r[0], r[1] ) for r in result[ 'Value' ] if r ]
numOwners = len( data )
#If there are no owners do now
if numOwners == 0:
return S_OK()
#Split the share amongst the number of owners
share /= numOwners
entitiesShares = dict( [ ( row[0], share ) for row in data ] )
#If corrector is enabled let it work it's magic
if self.isSharesCorrectionEnabled():
entitiesShares = self.__sharesCorrector.correctShares( entitiesShares, group = userGroup )
#Keep updating
owners = dict( data )
#IF the user is already known and has more than 1 tq, the rest of the users don't need to be modified
#(The number of owners didn't change)
if userDN in owners and owners[ userDN ] > 1:
return self.__setPrioritiesForEntity( userDN, userGroup, entitiesShares[ userDN ], connObj = connObj )
#Oops the number of owners may have changed so we recalculate the prio for all owners in the group
for userDN in owners:
self.__setPrioritiesForEntity( userDN, userGroup, entitiesShares[ userDN ], connObj = connObj )
return S_OK()
def __setPrioritiesForEntity( self, userDN, userGroup, share, connObj = False, consolidationFunc = "AVG" ):
"""
Set the priority for a userDN/userGroup combo given a splitted share
"""
self.log.info( "Setting priorities to %s@%s TQs" % ( userDN, userGroup ) )
tqCond = [ "t.OwnerGroup='%s'" % userGroup ]
allowBgTQs = gConfig.getValue( "/Registry/Groups/%s/AllowBackgroundTQs" % userGroup, False )
if Properties.JOB_SHARING not in CS.getPropertiesForGroup( userGroup ):
res = self._escapeString( userDN )
if not res['OK']:
return res
userDN = res['Value']
tqCond.append( "t.OwnerDN= %s " % userDN )
tqCond.append( "t.TQId = j.TQId" )
if consolidationFunc == 'AVG':
selectSQL = "SELECT j.TQId, SUM( j.RealPriority )/COUNT(j.RealPriority) FROM `tq_TaskQueues` t, `tq_Jobs` j WHERE "
elif consolidationFunc == 'SUM':
selectSQL = "SELECT j.TQId, SUM( j.RealPriority ) FROM `tq_TaskQueues` t, `tq_Jobs` j WHERE "
else:
return S_ERROR( "Unknown consolidation func %s for setting priorities" % consolidationFunc )
selectSQL += " AND ".join( tqCond )
selectSQL += " GROUP BY t.TQId"
result = self._query( selectSQL, conn = connObj )
if not result[ 'OK' ]:
return result
tqDict = dict( result[ 'Value' ] )
if len( tqDict ) == 0:
return S_OK()
#Calculate Sum of priorities
totalPrio = 0
for k in tqDict:
if tqDict[k] > 0.1 or not allowBgTQs:
totalPrio += tqDict[ k ]
#Update prio for each TQ
for tqId in tqDict:
if tqDict[ tqId ] > 0.1 or not allowBgTQs:
prio = ( share / totalPrio ) * tqDict[ tqId ]
else:
prio = TQ_MIN_SHARE
prio = max( prio, TQ_MIN_SHARE )
tqDict[ tqId ] = prio
#Generate groups of TQs that will have the same prio=sum(prios) maomenos
result = self.retrieveTaskQueues( list( tqDict ) )
if not result[ 'OK' ]:
return result
allTQsData = result[ 'Value' ]
tqGroups = {}
for tqid in allTQsData:
tqData = allTQsData[ tqid ]
for field in ( 'Jobs', 'Priority' ) + priorityIgnoredFields:
if field in tqData:
tqData.pop( field )
tqHash = []
for f in sorted( tqData ):
tqHash.append( "%s:%s" % ( f, tqData[ f ] ) )
tqHash = "|".join( tqHash )
if tqHash not in tqGroups:
tqGroups[ tqHash ] = []
tqGroups[ tqHash ].append( tqid )
tqGroups = [ tqGroups[ td ] for td in tqGroups ]
#Do the grouping
for tqGroup in tqGroups:
totalPrio = 0
if len( tqGroup ) < 2:
continue
for tqid in tqGroup:
totalPrio += tqDict[ tqid ]
for tqid in tqGroup:
tqDict[ tqid ] = totalPrio
#Group by priorities
prioDict = {}
for tqId in tqDict:
prio = tqDict[ tqId ]
if prio not in prioDict:
prioDict[ prio ] = []
prioDict[ prio ].append( tqId )
#Execute updates
for prio in prioDict:
tqList = ", ".join( [ str( tqId ) for tqId in prioDict[ prio ] ] )
updateSQL = "UPDATE `tq_TaskQueues` SET Priority=%.4f WHERE TQId in ( %s )" % ( prio, tqList )
self._update( updateSQL, conn = connObj )
return S_OK()
def getGroupShares( self ):
"""
Get all the shares as a DICT
"""
result = gConfig.getSections( "/Registry/Groups" )
if result[ 'OK' ]:
groups = result[ 'Value' ]
else:
groups = []
shares = {}
for group in groups:
shares[ group ] = gConfig.getValue( "/Registry/Groups/%s/JobShare" % group, DEFAULT_GROUP_SHARE )
return shares
| Andrew-McNab-UK/DIRAC | WorkloadManagementSystem/DB/TaskQueueDB.py | Python | gpl-3.0 | 53,146 | [
"DIRAC"
] | 6175a54864e0b2e6cbe7a17bd3d8dbe87f220f3ca59c3ee7bb4130d6bef6c643 |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkMergeTables(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkMergeTables(), 'Processing.',
('vtkTable', 'vtkTable'), ('vtkTable',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| fvpolpeta/devide | modules/vtk_basic/vtkMergeTables.py | Python | bsd-3-clause | 488 | [
"VTK"
] | b792a9af5e383a36dfbbcd85b88a2e3647676a9e70b88cddd359521213392fbc |
########################################################################
# $HeadURL$
# File : PilotDirector.py
# Author : Ricardo Graciani
########################################################################
"""
Base PilotDirector class to be inherited by DIRAC and Grid specific PilotDirectors, inherited by MW
specific PilotDirectors if appropriated.
It includes:
- basic configuration functionality
The main difference between DIRAC and Grid Pilot Directors is that in the first case
DIRAC talks directly to the local resources via a DIRAC CE class, while in the second
many CE's are address at the same time via a Grid Resource Broker.
This means that DIRAC direct submission to Grid CE's (CREAM, ...) will be handled by DIRAC Pilot
Director making use of a DIRAC CREAM Computing Element class
"""
__RCSID__ = "$Id$"
import os, random
random.seed()
import DIRAC
# Some reasonable Defaults
DIRAC_PILOT = os.path.join( DIRAC.rootPath, 'DIRAC', 'WorkloadManagementSystem', 'PilotAgent', 'dirac-pilot.py' )
DIRAC_INSTALL = os.path.join( DIRAC.rootPath, 'DIRAC', 'Core', 'scripts', 'dirac-install.py' )
DIRAC_MODULES = [ os.path.join( DIRAC.rootPath, 'DIRAC', 'WorkloadManagementSystem', 'PilotAgent', 'pilotCommands.py' ),
os.path.join( DIRAC.rootPath, 'DIRAC', 'WorkloadManagementSystem', 'PilotAgent', 'pilotTools.py' ) ]
DIRAC_VERSION = 'Integration'
DIRAC_PROJECT = ''
DIRAC_INSTALLATION = ''
MAX_JOBS_IN_FILLMODE = 2
ERROR_CLEAR_TIME = 60 * 60 # 1 hour
ERROR_TICKET_TIME = 60 * 60 # 1 hour (added to the above)
FROM_MAIL = "diracproject@gmail.com"
VIRTUAL_ORGANIZATION = 'dirac'
ENABLE_LISTMATCH = 1
LISTMATCH_DELAY = 5
PRIVATE_PILOT_FRACTION = 0.5
ERROR_PROXY = 'No proxy Available'
ERROR_TOKEN = 'Invalid proxy token request'
ERROR_GENERIC_CREDENTIALS = "Cannot find generic pilot credentials"
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.WorkloadManagementSystem.private.ConfigHelper import findGenericPilotCredentials
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.ConfigurationSystem.Client.Helpers import getCSExtensions
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup, getPropertiesForGroup
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
from DIRAC import S_OK, S_ERROR, gLogger, gConfig
from DIRAC.Core.Utilities.DictCache import DictCache
class PilotDirector( object ):
"""
Base Pilot Director class.
Derived classes must implement:
* __init__( self, submitPool ):
that must call the parent class __init__ method and then do its own initialization
* configure( self, csSection, submitPool ):
that must call the parent class configure method and the do its own configuration
* _submitPilot( self, workDir, taskQueueDict, pilotOptions, pilotsToSubmit, ceMask,
submitPrivatePilot, privateTQ, proxy, pilotsPerJob )
* _listMatch( self, proxy, jdl, taskQueueID, rb )
* _getChildrenReferences( self, proxy, parentReference, taskQueueID )
Derived classes might implement:
* configureFromSection( self, mySection ):
to reload from a CS section the additional datamembers they might have defined.
If additional datamembers are defined, they must:
- be declared in the __init__
- be reconfigured in the configureFromSection method by executing
self.reloadConfiguration( csSection, submitPool ) in their configure method
"""
gridMiddleware = ''
def __init__( self, submitPool ):
"""
Define the logger and some defaults
"""
if submitPool == self.gridMiddleware:
self.log = gLogger.getSubLogger( '%sPilotDirector' % self.gridMiddleware )
else:
self.log = gLogger.getSubLogger( '%sPilotDirector/%s' % ( self.gridMiddleware, submitPool ) )
self.pilot = DIRAC_PILOT
self.submitPoolOption = '-o /Resources/Computing/CEDefaults/SubmitPool=%s' % submitPool
self.extraPilotOptions = []
self.installVersion = DIRAC_VERSION
self.installProject = DIRAC_PROJECT
self.installation = DIRAC_INSTALLATION
self.pilotExtensionsList = []
self.virtualOrganization = VIRTUAL_ORGANIZATION
self.install = DIRAC_INSTALL
self.extraModules = DIRAC_MODULES
self.maxJobsInFillMode = MAX_JOBS_IN_FILLMODE
self.targetGrids = [ self.gridMiddleware ]
self.enableListMatch = ENABLE_LISTMATCH
self.listMatchDelay = LISTMATCH_DELAY
self.listMatchCache = DictCache()
self.privatePilotFraction = PRIVATE_PILOT_FRACTION
self.errorClearTime = ERROR_CLEAR_TIME
self.errorTicketTime = ERROR_TICKET_TIME
self.errorMailAddress = DIRAC.errorMail
self.alarmMailAddress = DIRAC.alarmMail
self.mailFromAddress = FROM_MAIL
if not 'log' in self.__dict__:
self.log = gLogger.getSubLogger( 'PilotDirector' )
self.log.info( 'Initialized' )
def configure( self, csSection, submitPool ):
"""
Here goes common configuration for all PilotDirectors
"""
self.configureFromSection( csSection )
self.reloadConfiguration( csSection, submitPool )
# Get the defaults for the Setup where the Director is running
opsHelper = Operations()
self.installVersion = opsHelper.getValue( cfgPath( 'Pilot', 'Version' ), [ self.installVersion ] )[0]
self.installProject = opsHelper.getValue( cfgPath( 'Pilot', 'Project' ), self.installProject )
self.installation = opsHelper.getValue( cfgPath( 'Pilot', 'Installation' ), self.installation )
self.pilotExtensionsList = opsHelper.getValue( "Pilot/Extensions", self.pilotExtensionsList )
self.log.info( '===============================================' )
self.log.info( 'Configuration:' )
self.log.info( '' )
self.log.info( ' Target Grids: ', ', '.join( self.targetGrids ) )
self.log.info( ' Install script: ', self.install )
self.log.info( ' Pilot script: ', self.pilot )
self.log.info( ' Pilot modules', self.extraModules )
self.log.info( ' Install Ver: ', self.installVersion )
if self.installProject:
self.log.info( ' Project: ', self.installProject )
if self.installation:
self.log.info( ' Installation: ', self.installation )
if self.extraPilotOptions:
self.log.info( ' Extra Options: ', ' '.join( self.extraPilotOptions ) )
self.log.info( ' ListMatch: ', self.enableListMatch )
self.log.info( ' Private %: ', self.privatePilotFraction * 100 )
if self.enableListMatch:
self.log.info( ' ListMatch Delay:', self.listMatchDelay )
self.listMatchCache.purgeExpired()
def reloadConfiguration( self, csSection, submitPool ):
"""
Common Configuration can be overwriten for each GridMiddleware
"""
mySection = csSection + '/' + self.gridMiddleware
self.configureFromSection( mySection )
# And Again for each SubmitPool
mySection = csSection + '/' + submitPool
self.configureFromSection( mySection )
def configureFromSection( self, mySection ):
"""
reload from CS
"""
self.pilot = gConfig.getValue( mySection + '/PilotScript' , self.pilot )
self.installVersion = gConfig.getValue( mySection + '/Version' , self.installVersion )
self.extraPilotOptions = gConfig.getValue( mySection + '/ExtraPilotOptions' , self.extraPilotOptions )
self.install = gConfig.getValue( mySection + '/InstallScript' , self.install )
self.extraModules = gConfig.getValue( mySection + '/ExtraPilotModules' , [] ) + self.extraModules
self.installProject = gConfig.getValue( mySection + '/Project' , self.installProject )
self.installation = gConfig.getValue( mySection + '/Installation' , self.installation )
self.maxJobsInFillMode = gConfig.getValue( mySection + '/MaxJobsInFillMode' , self.maxJobsInFillMode )
self.targetGrids = gConfig.getValue( mySection + '/TargetGrids' , self.targetGrids )
self.enableListMatch = gConfig.getValue( mySection + '/EnableListMatch' , self.enableListMatch )
self.listMatchDelay = gConfig.getValue( mySection + '/ListMatchDelay' , self.listMatchDelay )
self.errorClearTime = gConfig.getValue( mySection + '/ErrorClearTime' , self.errorClearTime )
self.errorTicketTime = gConfig.getValue( mySection + '/ErrorTicketTime' , self.errorTicketTime )
self.errorMailAddress = gConfig.getValue( mySection + '/ErrorMailAddress' , self.errorMailAddress )
self.alarmMailAddress = gConfig.getValue( mySection + '/AlarmMailAddress' , self.alarmMailAddress )
self.mailFromAddress = gConfig.getValue( mySection + '/MailFromAddress' , self.mailFromAddress )
self.privatePilotFraction = gConfig.getValue( mySection + '/PrivatePilotFraction' , self.privatePilotFraction )
virtualOrganization = gConfig.getValue( mySection + '/VirtualOrganization' , '' )
if not virtualOrganization:
virtualOrganization = getVOForGroup( 'NonExistingGroup' )
if not virtualOrganization:
virtualOrganization = self.virtualOrganization
self.virtualOrganization = virtualOrganization
def _resolveCECandidates( self, taskQueueDict ):
"""
Return a list of CEs for this TaskQueue
"""
# assume user knows what they're doing and avoid site mask e.g. sam jobs
if 'GridCEs' in taskQueueDict and taskQueueDict['GridCEs']:
self.log.info( 'CEs requested by TaskQueue %s:' % taskQueueDict['TaskQueueID'],
', '.join( taskQueueDict['GridCEs'] ) )
return taskQueueDict['GridCEs']
# Get the mask
siteStatus = SiteStatus()
ret = siteStatus.getUsableSites( 'ComputingAccess' )
if not ret['OK']:
self.log.error( 'Can not retrieve site Mask from DB:', ret['Message'] )
return []
usableSites = ret['Value']
if not usableSites:
self.log.error( 'Site mask is empty' )
return []
self.log.verbose( 'Site Mask: %s' % ', '.join( usableSites ) )
# remove banned sites from siteMask
if 'BannedSites' in taskQueueDict:
for site in taskQueueDict['BannedSites']:
if site in usableSites:
usableSites.remove( site )
self.log.verbose( 'Removing banned site %s from site Mask' % site )
# remove from the mask if a Site is given
siteMask = [ site for site in usableSites if 'Sites' not in taskQueueDict or site in taskQueueDict['Sites'] ]
if not siteMask:
# pilot can not be submitted
self.log.info( 'No Valid Site Candidate in Mask for TaskQueue %s' % taskQueueDict['TaskQueueID'] )
return []
self.log.info( 'Site Candidates for TaskQueue %s:' % taskQueueDict['TaskQueueID'], ', '.join( siteMask ) )
# Get CE's associates to the given site Names
ceMask = []
resources = Resources( vo = self.virtualOrganization )
result = resources.getEligibleResources( 'Computing', {'Site':siteMask,
'SubmissionMode':'gLite',
'CEType':['LCG','CREAM']} )
if not result['OK']:
self.log.error( "Failed to get eligible ce's:", result['Message'] )
return []
ces = result['Value']
for ce in ces:
ceHost = resources.getComputingElementValue( ce, 'Host', 'unknown' )
if ceHost != 'unknown':
ceMask.append( ceHost )
if not ceMask:
self.log.info( 'No CE Candidate found for TaskQueue %s:' % taskQueueDict['TaskQueueID'], ', '.join( siteMask ) )
self.log.verbose( 'CE Candidates for TaskQueue %s:' % taskQueueDict['TaskQueueID'], ', '.join( ceMask ) )
return ceMask
def _getPilotOptions( self, taskQueueDict, pilotsToSubmit ):
# Need to limit the maximum number of pilots to submit at once
# For generic pilots this is limited by the number of use of the tokens and the
# maximum number of jobs in Filling mode, but for private Jobs we need an extra limitation:
pilotsToSubmit = max( min( pilotsToSubmit, int( 50 / self.maxJobsInFillMode ) ), 1 )
pilotOptions = []
privateIfGenericTQ = self.privatePilotFraction > random.random()
privateTQ = ( 'PilotTypes' in taskQueueDict and 'private' in [ t.lower() for t in taskQueueDict['PilotTypes'] ] )
forceGeneric = 'ForceGeneric' in taskQueueDict
submitPrivatePilot = ( privateIfGenericTQ or privateTQ ) and not forceGeneric
if submitPrivatePilot:
self.log.verbose( 'Submitting private pilots for TaskQueue %s' % taskQueueDict['TaskQueueID'] )
ownerDN = taskQueueDict['OwnerDN']
ownerGroup = taskQueueDict['OwnerGroup']
# User Group requirement
pilotOptions.append( '-G %s' % taskQueueDict['OwnerGroup'] )
# check if group allows jobsharing
ownerGroupProperties = getPropertiesForGroup( ownerGroup )
if not 'JobSharing' in ownerGroupProperties:
# Add Owner requirement to pilot
pilotOptions.append( "-O '%s'" % ownerDN )
if privateTQ:
pilotOptions.append( '-o /Resources/Computing/CEDefaults/PilotType=private' )
maxJobsInFillMode = self.maxJobsInFillMode
else:
#For generic jobs we'll submit mixture of generic and private pilots
self.log.verbose( 'Submitting generic pilots for TaskQueue %s' % taskQueueDict['TaskQueueID'] )
#ADRI: Find the generic group
result = findGenericPilotCredentials( group = taskQueueDict[ 'OwnerGroup' ] )
if not result[ 'OK' ]:
self.log.error( ERROR_GENERIC_CREDENTIALS, result[ 'Message' ] )
return S_ERROR( ERROR_GENERIC_CREDENTIALS )
ownerDN, ownerGroup = result[ 'Value' ]
result = gProxyManager.requestToken( ownerDN, ownerGroup, max( pilotsToSubmit, self.maxJobsInFillMode ) )
if not result[ 'OK' ]:
self.log.error( ERROR_TOKEN, result['Message'] )
return S_ERROR( ERROR_TOKEN )
( token, numberOfUses ) = result[ 'Value' ]
pilotsToSubmit = min( numberOfUses, pilotsToSubmit )
pilotOptions.append( '-o /Security/ProxyToken=%s' % token )
pilotsToSubmit = max( 1, ( pilotsToSubmit - 1 ) / self.maxJobsInFillMode + 1 )
maxJobsInFillMode = int( numberOfUses / pilotsToSubmit )
# Use Filling mode
pilotOptions.append( '-M %s' % maxJobsInFillMode )
# Debug
pilotOptions.append( '-d' )
# Setup.
pilotOptions.append( '-S %s' % taskQueueDict['Setup'] )
# CS Servers
csServers = gConfig.getServersList()
if len( csServers ) > 3:
# Remove the master
master = gConfigurationData.getMasterServer()
if master in csServers:
csServers.remove( master )
pilotOptions.append( '-C %s' % ",".join( csServers ) )
# DIRAC Extensions to be used in pilots
# ubeda: I'm not entirely sure if we can use here the same opsHelper as in line
# line +352
pilotExtensionsList = Operations().getValue( "Pilot/Extensions", [] )
extensionsList = []
if pilotExtensionsList:
if pilotExtensionsList[0] != 'None':
extensionsList = pilotExtensionsList
else:
extensionsList = getCSExtensions()
if extensionsList:
pilotOptions.append( '-e %s' % ",".join( extensionsList ) )
#Get DIRAC version and project, There might be global Setup defaults and per VO/Setup defaults (from configure)
opsHelper = Operations( group = taskQueueDict['OwnerGroup'], setup = taskQueueDict['Setup'] )
# Requested version of DIRAC (it can be a list, so we take the fist one)
version = opsHelper.getValue( cfgPath( 'Pilot', 'Version' ) , [ self.installVersion ] )[0]
pilotOptions.append( '-r %s' % version )
# Requested Project to install
installProject = opsHelper.getValue( cfgPath( 'Pilot', 'Project' ) , self.installProject )
if installProject:
pilotOptions.append( '-l %s' % installProject )
installation = opsHelper.getValue( cfgPath( 'Pilot', 'Installation' ), self.installation )
if installation:
pilotOptions.append( "-V %s" % installation )
# Requested CPU time
pilotOptions.append( '-T %s' % taskQueueDict['CPUTime'] )
if self.submitPoolOption not in self.extraPilotOptions:
pilotOptions.append( self.submitPoolOption )
if self.extraPilotOptions:
pilotOptions.extend( self.extraPilotOptions )
return S_OK( ( pilotOptions, pilotsToSubmit, ownerDN, ownerGroup, submitPrivatePilot, privateTQ ) )
def _submitPilots( self, workDir, taskQueueDict, pilotOptions, pilotsToSubmit,
ceMask, submitPrivatePilot, privateTQ, proxy, pilotsPerJob ):
"""
This method must be implemented on the Backend specific derived class.
This is problem with the Director, not with the Job so we must return S_OK
Return S_ERROR if not defined.
"""
self.log.error( '_submitPilots method not implemented' )
return S_OK()
def _listMatch( self, proxy, jdl, taskQueueID, rb ):
""" This method must be implemented on the Backend specific derived class.
"""
self.log.error( '_listMatch method not implemented' )
return S_OK()
def _getChildrenReferences( self, proxy, parentReference, taskQueueID ):
""" This method must be implemented on the Backend specific derived class.
"""
self.log.error( '_getChildrenReferences method not implemented' )
return S_OK()
def submitPilots( self, taskQueueDict, pilotsToSubmit, workDir = None ):
"""
Submit pilot for the given TaskQueue,
this method just insert the request in the corresponding ThreadPool,
the submission is done from the Thread Pool job
"""
try:
taskQueueID = taskQueueDict['TaskQueueID']
self.log.verbose( 'Submitting Pilot' )
ceMask = self._resolveCECandidates( taskQueueDict )
if not ceMask:
return S_ERROR( 'No CE available for TaskQueue %d' % int( taskQueueID ) )
result = self._getPilotOptions( taskQueueDict, pilotsToSubmit )
if not result['OK']:
return result
( pilotOptions, pilotsPerJob, ownerDN, ownerGroup, submitPrivatePilot, privateTQ ) = result['Value']
# get a valid proxy, submit with a long proxy to avoid renewal
ret = self._getPilotProxyFromDIRACGroup( ownerDN, ownerGroup, requiredTimeLeft = 86400 * 5 )
if not ret['OK']:
self.log.error( ret['Message'] )
self.log.error( 'No proxy Available', 'User "%s", Group "%s"' % ( ownerDN, ownerGroup ) )
return S_ERROR( ERROR_PROXY )
proxy = ret['Value']
# Now call a Grid Specific method to handle the final submission of the pilots
return self._submitPilots( workDir, taskQueueDict, pilotOptions,
pilotsToSubmit, ceMask,
submitPrivatePilot, privateTQ,
proxy, pilotsPerJob )
except Exception:
self.log.exception( 'Error in Pilot Submission' )
return S_OK( 0 )
def _getPilotProxyFromDIRACGroup( self, ownerDN, ownerGroup, requiredTimeLeft ):
"""
To be overwritten if a given Pilot does not require a full proxy
"""
self.log.info( "Downloading %s@%s proxy" % ( ownerDN, ownerGroup ) )
return gProxyManager.getPilotProxyFromDIRACGroup( ownerDN, ownerGroup, requiredTimeLeft )
def exceptionCallBack( self, threadedJob, exceptionInfo ):
self.log.exception( 'Error in Pilot Submission' )
| Sbalbp/DIRAC | WorkloadManagementSystem/private/PilotDirector.py | Python | gpl-3.0 | 19,651 | [
"DIRAC"
] | d1901bb268824a3459811cec4c126a86ee6eafc52ba14b8368db57ba752e135d |
#__docformat__ = "restructuredtext en"
# ******NOTICE***************
# optimize.py module by Travis E. Oliphant
#
# You may copy and use this module as you see fit with no
# guarantee implied provided you keep this notice in all copies.
# *****END NOTICE************
# A collection of optimization algorithms. Version 0.5
# CHANGES
# Added fminbound (July 2001)
# Added brute (Aug. 2002)
# Finished line search satisfying strong Wolfe conditions (Mar. 2004)
# Updated strong Wolfe conditions line search to use
# cubic-interpolation (Mar. 2004)
from __future__ import division, print_function, absolute_import
# Minimization routines
__all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg',
'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der',
'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime',
'line_search', 'check_grad', 'OptimizeResult', 'show_options',
'OptimizeWarning']
__docformat__ = "restructuredtext en"
import warnings
import sys
import numpy
from scipy._lib.six import callable
from numpy import (atleast_1d, eye, mgrid, argmin, zeros, shape, squeeze,
vectorize, asarray, sqrt, Inf, asfarray, isinf)
import numpy as np
from .linesearch import (line_search_wolfe1, line_search_wolfe2,
line_search_wolfe2 as line_search,
LineSearchWarning)
# standard status messages of optimizers
_status_message = {'success': 'Optimization terminated successfully.',
'maxfev': 'Maximum number of function evaluations has '
'been exceeded.',
'maxiter': 'Maximum number of iterations has been '
'exceeded.',
'pr_loss': 'Desired error not necessarily achieved due '
'to precision loss.'}
class MemoizeJac(object):
""" Decorator that caches the value gradient of function each time it
is called. """
def __init__(self, fun):
self.fun = fun
self.jac = None
self.x = None
def __call__(self, x, *args):
self.x = numpy.asarray(x).copy()
fg = self.fun(x, *args)
self.jac = fg[1]
return fg[0]
def derivative(self, x, *args):
if self.jac is not None and numpy.alltrue(x == self.x):
return self.jac
else:
self(x, *args)
return self.jac
class OptimizeResult(dict):
""" Represents the optimization result.
Attributes
----------
x : ndarray
The solution of the optimization.
success : bool
Whether or not the optimizer exited successfully.
status : int
Termination status of the optimizer. Its value depends on the
underlying solver. Refer to `message` for details.
message : str
Description of the cause of the termination.
fun, jac, hess, hess_inv : ndarray
Values of objective function, Jacobian, Hessian or its inverse (if
available). The Hessians may be approximations, see the documentation
of the function in question.
nfev, njev, nhev : int
Number of evaluations of the objective functions and of its
Jacobian and Hessian.
nit : int
Number of iterations performed by the optimizer.
maxcv : float
The maximum constraint violation.
Notes
-----
There may be additional attributes not listed above depending of the
specific solver. Since this class is essentially a subclass of dict
with attribute accessors, one can see which attributes are available
using the `keys()` method.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in self.items()])
else:
return self.__class__.__name__ + "()"
class OptimizeWarning(UserWarning):
pass
def _check_unknown_options(unknown_options):
if unknown_options:
msg = ", ".join(map(str, unknown_options.keys()))
# Stack level 4: this is called from _minimize_*, which is
# called from another function in Scipy. Level 4 is the first
# level in user code.
warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, 4)
def is_array_scalar(x):
"""Test whether `x` is either a scalar or an array scalar.
"""
return np.size(x) == 1
_epsilon = sqrt(numpy.finfo(float).eps)
def vecnorm(x, ord=2):
if ord == Inf:
return numpy.amax(numpy.abs(x))
elif ord == -Inf:
return numpy.amin(numpy.abs(x))
else:
return numpy.sum(numpy.abs(x)**ord, axis=0)**(1.0 / ord)
def rosen(x):
"""
The Rosenbrock function.
The function computed is::
sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0
Parameters
----------
x : array_like
1-D array of points at which the Rosenbrock function is to be computed.
Returns
-------
f : float
The value of the Rosenbrock function.
See Also
--------
rosen_der, rosen_hess, rosen_hess_prod
"""
x = asarray(x)
r = numpy.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0,
axis=0)
return r
def rosen_der(x):
"""
The derivative (i.e. gradient) of the Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the derivative is to be computed.
Returns
-------
rosen_der : (N,) ndarray
The gradient of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_hess, rosen_hess_prod
"""
x = asarray(x)
xm = x[1:-1]
xm_m1 = x[:-2]
xm_p1 = x[2:]
der = numpy.zeros_like(x)
der[1:-1] = (200 * (xm - xm_m1**2) -
400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm))
der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
der[-1] = 200 * (x[-1] - x[-2]**2)
return der
def rosen_hess(x):
"""
The Hessian matrix of the Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
Returns
-------
rosen_hess : ndarray
The Hessian matrix of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_der, rosen_hess_prod
"""
x = atleast_1d(x)
H = numpy.diag(-400 * x[:-1], 1) - numpy.diag(400 * x[:-1], -1)
diagonal = numpy.zeros(len(x), dtype=x.dtype)
diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2
diagonal[-1] = 200
diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]
H = H + numpy.diag(diagonal)
return H
def rosen_hess_prod(x, p):
"""
Product of the Hessian matrix of the Rosenbrock function with a vector.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
p : array_like
1-D array, the vector to be multiplied by the Hessian matrix.
Returns
-------
rosen_hess_prod : ndarray
The Hessian matrix of the Rosenbrock function at `x` multiplied
by the vector `p`.
See Also
--------
rosen, rosen_der, rosen_hess
"""
x = atleast_1d(x)
Hp = numpy.zeros(len(x), dtype=x.dtype)
Hp[0] = (1200 * x[0]**2 - 400 * x[1] + 2) * p[0] - 400 * x[0] * p[1]
Hp[1:-1] = (-400 * x[:-2] * p[:-2] +
(202 + 1200 * x[1:-1]**2 - 400 * x[2:]) * p[1:-1] -
400 * x[1:-1] * p[2:])
Hp[-1] = -400 * x[-2] * p[-2] + 200*p[-1]
return Hp
def wrap_function(function, args):
ncalls = [0]
if function is None:
return ncalls, None
def function_wrapper(*wrapper_args):
ncalls[0] += 1
return function(*(wrapper_args + args))
return ncalls, function_wrapper
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
full_output=0, disp=1, retall=0, callback=None):
"""
Minimize a function using the downhill simplex algorithm.
This algorithm only uses function values, not derivatives or second
derivatives.
Parameters
----------
func : callable func(x,*args)
The objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func, i.e. ``f(x,*args)``.
callback : callable, optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
xtol : float, optional
Relative error in xopt acceptable for convergence.
ftol : number, optional
Relative error in func(xopt) acceptable for convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : number, optional
Maximum number of function evaluations to make.
full_output : bool, optional
Set to True if fopt and warnflag outputs are desired.
disp : bool, optional
Set to True to print convergence messages.
retall : bool, optional
Set to True to return list of solutions at each iteration.
Returns
-------
xopt : ndarray
Parameter that minimizes function.
fopt : float
Value of function at minimum: ``fopt = func(xopt)``.
iter : int
Number of iterations performed.
funcalls : int
Number of function calls made.
warnflag : int
1 : Maximum number of function evaluations made.
2 : Maximum number of iterations reached.
allvecs : list
Solution at each iteration.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Nelder-Mead' `method` in particular.
Notes
-----
Uses a Nelder-Mead simplex algorithm to find the minimum of function of
one or more variables.
This algorithm has a long history of successful use in applications.
But it will usually be slower than an algorithm that uses first or
second derivative information. In practice it can have poor
performance in high-dimensional problems and is not robust to
minimizing complicated functions. Additionally, there currently is no
complete theory describing when the algorithm will successfully
converge to the minimum, or how fast it will if it does.
References
----------
.. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function
minimization", The Computer Journal, 7, pp. 308-313
.. [2] Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now
Respectable", in Numerical Analysis 1995, Proceedings of the
1995 Dundee Biennial Conference in Numerical Analysis, D.F.
Griffiths and G.A. Watson (Eds.), Addison Wesley Longman,
Harlow, UK, pp. 191-208.
"""
opts = {'xtol': xtol,
'ftol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'return_all': retall}
res = _minimize_neldermead(func, x0, args, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_neldermead(func, x0, args=(), callback=None,
xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfev : int
Maximum number of function evaluations to make.
"""
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
fcalls, func = wrap_function(func, args)
x0 = asfarray(x0).flatten()
N = len(x0)
if maxiter is None:
maxiter = N * 200
if maxfun is None:
maxfun = N * 200
rho = 1
chi = 2
psi = 0.5
sigma = 0.5
one2np1 = list(range(1, N + 1))
sim = numpy.zeros((N + 1, N), dtype=x0.dtype)
fsim = numpy.zeros((N + 1,), float)
sim[0] = x0
if retall:
allvecs = [sim[0]]
fsim[0] = func(x0)
nonzdelt = 0.05
zdelt = 0.00025
for k in range(0, N):
y = numpy.array(x0, copy=True)
if y[k] != 0:
y[k] = (1 + nonzdelt)*y[k]
else:
y[k] = zdelt
sim[k + 1] = y
f = func(y)
fsim[k + 1] = f
ind = numpy.argsort(fsim)
fsim = numpy.take(fsim, ind, 0)
# sort so sim[0,:] has the lowest function value
sim = numpy.take(sim, ind, 0)
iterations = 1
while (fcalls[0] < maxfun and iterations < maxiter):
if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xtol and
numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= ftol):
break
xbar = numpy.add.reduce(sim[:-1], 0) / N
xr = (1 + rho) * xbar - rho * sim[-1]
fxr = func(xr)
doshrink = 0
if fxr < fsim[0]:
xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
fxe = func(xe)
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
else:
sim[-1] = xr
fsim[-1] = fxr
else: # fsim[0] <= fxr
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < fsim[-1]:
xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
fxc = func(xc)
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink = 1
else:
# Perform an inside contraction
xcc = (1 - psi) * xbar + psi * sim[-1]
fxcc = func(xcc)
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in one2np1:
sim[j] = sim[0] + sigma * (sim[j] - sim[0])
fsim[j] = func(sim[j])
ind = numpy.argsort(fsim)
sim = numpy.take(sim, ind, 0)
fsim = numpy.take(fsim, ind, 0)
if callback is not None:
callback(sim[0])
iterations += 1
if retall:
allvecs.append(sim[0])
x = sim[0]
fval = numpy.min(fsim)
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print('Warning: ' + msg)
elif iterations >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print('Warning: ' + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iterations)
print(" Function evaluations: %d" % fcalls[0])
result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x)
if retall:
result['allvecs'] = allvecs
return result
def _approx_fprime_helper(xk, f, epsilon, args=(), f0=None):
"""
See ``approx_fprime``. An optional initial function value arg is added.
"""
if f0 is None:
f0 = f(*((xk,) + args))
grad = numpy.zeros((len(xk),), float)
ei = numpy.zeros((len(xk),), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
def approx_fprime(xk, f, epsilon, *args):
"""Finite-difference approximation of the gradient of a scalar function.
Parameters
----------
xk : array_like
The coordinate vector at which to determine the gradient of `f`.
f : callable
The function of which to determine the gradient (partial derivatives).
Should take `xk` as first argument, other arguments to `f` can be
supplied in ``*args``. Should return a scalar, the value of the
function at `xk`.
epsilon : array_like
Increment to `xk` to use for determining the function gradient.
If a scalar, uses the same finite difference delta for all partial
derivatives. If an array, should contain one value per element of
`xk`.
\*args : args, optional
Any other arguments that are to be passed to `f`.
Returns
-------
grad : ndarray
The partial derivatives of `f` to `xk`.
See Also
--------
check_grad : Check correctness of gradient function against approx_fprime.
Notes
-----
The function gradient is determined by the forward finite difference
formula::
f(xk[i] + epsilon[i]) - f(xk[i])
f'[i] = ---------------------------------
epsilon[i]
The main use of `approx_fprime` is in scalar function optimizers like
`fmin_bfgs`, to determine numerically the Jacobian of a function.
Examples
--------
>>> from scipy import optimize
>>> def func(x, c0, c1):
... "Coordinate vector `x` should be an array of size two."
... return c0 * x[0]**2 + c1*x[1]**2
>>> x = np.ones(2)
>>> c0, c1 = (1, 200)
>>> eps = np.sqrt(np.finfo(np.float).eps)
>>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1)
array([ 2. , 400.00004198])
"""
return _approx_fprime_helper(xk, f, epsilon, args=args)
def check_grad(func, grad, x0, *args, **kwargs):
"""Check the correctness of a gradient function by comparing it against a
(forward) finite-difference approximation of the gradient.
Parameters
----------
func : callable ``func(x0, *args)``
Function whose derivative is to be checked.
grad : callable ``grad(x0, *args)``
Gradient of `func`.
x0 : ndarray
Points to check `grad` against forward difference approximation of grad
using `func`.
args : \*args, optional
Extra arguments passed to `func` and `grad`.
epsilon : float, optional
Step size used for the finite difference approximation. It defaults to
``sqrt(numpy.finfo(float).eps)``, which is approximately 1.49e-08.
Returns
-------
err : float
The square root of the sum of squares (i.e. the 2-norm) of the
difference between ``grad(x0, *args)`` and the finite difference
approximation of `grad` using func at the points `x0`.
See Also
--------
approx_fprime
Examples
--------
>>> def func(x):
return x[0]**2 - 0.5 * x[1]**3
>>> def grad(x):
return [2 * x[0], -1.5 * x[1]**2]
>>> check_grad(func, grad, [1.5, -1.5])
2.9802322387695312e-08
"""
step = kwargs.pop('epsilon', _epsilon)
if kwargs:
raise ValueError("Unknown keyword arguments: %r" %
(list(kwargs.keys()),))
return sqrt(sum((grad(x0, *args) -
approx_fprime(x0, func, step, *args))**2))
def approx_fhess_p(x0, p, fprime, epsilon, *args):
f2 = fprime(*((x0 + epsilon*p,) + args))
f1 = fprime(*((x0,) + args))
return (f2 - f1) / epsilon
class _LineSearchError(RuntimeError):
pass
def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval,
**kwargs):
"""
Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
suitable step length is not found, and raise an exception if a
suitable step length is not found.
Raises
------
_LineSearchError
If no suitable step size is found
"""
ret = line_search_wolfe1(f, fprime, xk, pk, gfk,
old_fval, old_old_fval,
**kwargs)
if ret[0] is None:
# line search failed: try different one.
with warnings.catch_warnings():
warnings.simplefilter('ignore', LineSearchWarning)
ret = line_search_wolfe2(f, fprime, xk, pk, gfk,
old_fval, old_old_fval)
if ret[0] is None:
raise _LineSearchError()
return ret
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1,
retall=0, callback=None):
"""
Minimize a function using the BFGS algorithm.
Parameters
----------
f : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable f'(x,*args), optional
Gradient of f.
args : tuple, optional
Extra arguments passed to f and fprime.
gtol : float, optional
Gradient norm must be less than gtol before successful termination.
norm : float, optional
Order of norm (Inf is max, -Inf is min)
epsilon : int or ndarray, optional
If fprime is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function to call after each
iteration. Called as callback(xk), where xk is the
current parameter vector.
maxiter : int, optional
Maximum number of iterations to perform.
full_output : bool, optional
If True,return fopt, func_calls, grad_calls, and warnflag
in addition to xopt.
disp : bool, optional
Print convergence message if True.
retall : bool, optional
Return a list of results at each iteration if True.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. f(xopt) == fopt.
fopt : float
Minimum value.
gopt : ndarray
Value of gradient at minimum, f'(xopt), which should be near 0.
Bopt : ndarray
Value of 1/f''(xopt), i.e. the inverse hessian matrix.
func_calls : int
Number of function_calls made.
grad_calls : int
Number of gradient calls made.
warnflag : integer
1 : Maximum number of iterations exceeded.
2 : Gradient and/or function calls not changing.
allvecs : list
`OptimizeResult` at each iteration. Only returned if retall is True.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'BFGS' `method` in particular.
Notes
-----
Optimize the function, f, whose gradient is given by fprime
using the quasi-Newton method of Broyden, Fletcher, Goldfarb,
and Shanno (BFGS)
References
----------
Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198.
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['jac'], res['hess_inv'],
res['nfev'], res['njev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
BFGS algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
f = fun
fprime = jac
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
if x0.ndim == 0:
x0.shape = (1,)
if maxiter is None:
maxiter = len(x0) * 200
func_calls, f = wrap_function(f, args)
if fprime is None:
grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
else:
grad_calls, myfprime = wrap_function(fprime, args)
gfk = myfprime(x0)
k = 0
N = len(x0)
I = numpy.eye(N, dtype=int)
Hk = I
old_fval = f(x0)
old_old_fval = None
xk = x0
if retall:
allvecs = [x0]
sk = [2 * gtol]
warnflag = 0
gnorm = vecnorm(gfk, ord=norm)
while (gnorm > gtol) and (k < maxiter):
pk = -numpy.dot(Hk, gfk)
try:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, myfprime, xk, pk, gfk,
old_fval, old_old_fval)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
xkp1 = xk + alpha_k * pk
if retall:
allvecs.append(xkp1)
sk = xkp1 - xk
xk = xkp1
if gfkp1 is None:
gfkp1 = myfprime(xkp1)
yk = gfkp1 - gfk
gfk = gfkp1
if callback is not None:
callback(xk)
k += 1
gnorm = vecnorm(gfk, ord=norm)
if (gnorm <= gtol):
break
if not numpy.isfinite(old_fval):
# We correctly found +-Inf as optimal value, or something went
# wrong.
warnflag = 2
break
try: # this was handled in numeric, let it remaines for more safety
rhok = 1.0 / (numpy.dot(yk, sk))
except ZeroDivisionError:
rhok = 1000.0
if disp:
print("Divide-by-zero encountered: rhok assumed large")
if isinf(rhok): # this is patch for numpy
rhok = 1000.0
if disp:
print("Divide-by-zero encountered: rhok assumed large")
A1 = I - sk[:, numpy.newaxis] * yk[numpy.newaxis, :] * rhok
A2 = I - yk[:, numpy.newaxis] * sk[numpy.newaxis, :] * rhok
Hk = numpy.dot(A1, numpy.dot(Hk, A2)) + (rhok * sk[:, numpy.newaxis] *
sk[numpy.newaxis, :])
fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
result = OptimizeResult(fun=fval, jac=gfk, hess_inv=Hk, nfev=func_calls[0],
njev=grad_calls[0], status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon,
maxiter=None, full_output=0, disp=1, retall=0, callback=None):
"""
Minimize a function using a nonlinear conjugate gradient algorithm.
Parameters
----------
f : callable, ``f(x, *args)``
Objective function to be minimized. Here `x` must be a 1-D array of
the variables that are to be changed in the search for a minimum, and
`args` are the other (fixed) parameters of `f`.
x0 : ndarray
A user-supplied initial estimate of `xopt`, the optimal value of `x`.
It must be a 1-D array of values.
fprime : callable, ``fprime(x, *args)``, optional
A function that returns the gradient of `f` at `x`. Here `x` and `args`
are as described above for `f`. The returned value must be a 1-D array.
Defaults to None, in which case the gradient is approximated
numerically (see `epsilon`, below).
args : tuple, optional
Parameter values passed to `f` and `fprime`. Must be supplied whenever
additional fixed parameters are needed to completely specify the
functions `f` and `fprime`.
gtol : float, optional
Stop when the norm of the gradient is less than `gtol`.
norm : float, optional
Order to use for the norm of the gradient
(``-np.Inf`` is min, ``np.Inf`` is max).
epsilon : float or ndarray, optional
Step size(s) to use when `fprime` is approximated numerically. Can be a
scalar or a 1-D array. Defaults to ``sqrt(eps)``, with eps the
floating point machine precision. Usually ``sqrt(eps)`` is about
1.5e-8.
maxiter : int, optional
Maximum number of iterations to perform. Default is ``200 * len(x0)``.
full_output : bool, optional
If True, return `fopt`, `func_calls`, `grad_calls`, and `warnflag` in
addition to `xopt`. See the Returns section below for additional
information on optional return values.
disp : bool, optional
If True, return a convergence message, followed by `xopt`.
retall : bool, optional
If True, add to the returned values the results of each iteration.
callback : callable, optional
An optional user-supplied function, called after each iteration.
Called as ``callback(xk)``, where ``xk`` is the current value of `x0`.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. ``f(xopt) == fopt``.
fopt : float, optional
Minimum value found, f(xopt). Only returned if `full_output` is True.
func_calls : int, optional
The number of function_calls made. Only returned if `full_output`
is True.
grad_calls : int, optional
The number of gradient calls made. Only returned if `full_output` is
True.
warnflag : int, optional
Integer value with warning status, only returned if `full_output` is
True.
0 : Success.
1 : The maximum number of iterations was exceeded.
2 : Gradient and/or function calls were not changing. May indicate
that precision was lost, i.e., the routine did not converge.
allvecs : list of ndarray, optional
List of arrays, containing the results at each iteration.
Only returned if `retall` is True.
See Also
--------
minimize : common interface to all `scipy.optimize` algorithms for
unconstrained and constrained minimization of multivariate
functions. It provides an alternative way to call
``fmin_cg``, by specifying ``method='CG'``.
Notes
-----
This conjugate gradient algorithm is based on that of Polak and Ribiere
[1]_.
Conjugate gradient methods tend to work better when:
1. `f` has a unique global minimizing point, and no local minima or
other stationary points,
2. `f` is, at least locally, reasonably well approximated by a
quadratic function of the variables,
3. `f` is continuous and has a continuous gradient,
4. `fprime` is not too large, e.g., has a norm less than 1000,
5. The initial guess, `x0`, is reasonably close to `f` 's global
minimizing point, `xopt`.
References
----------
.. [1] Wright & Nocedal, "Numerical Optimization", 1999, pp. 120-122.
Examples
--------
Example 1: seek the minimum value of the expression
``a*u**2 + b*u*v + c*v**2 + d*u + e*v + f`` for given values
of the parameters and an initial guess ``(u, v) = (0, 0)``.
>>> args = (2, 3, 7, 8, 9, 10) # parameter values
>>> def f(x, *args):
... u, v = x
... a, b, c, d, e, f = args
... return a*u**2 + b*u*v + c*v**2 + d*u + e*v + f
>>> def gradf(x, *args):
... u, v = x
... a, b, c, d, e, f = args
... gu = 2*a*u + b*v + d # u-component of the gradient
... gv = b*u + 2*c*v + e # v-component of the gradient
... return np.asarray((gu, gv))
>>> x0 = np.asarray((0, 0)) # Initial guess.
>>> from scipy import optimize
>>> res1 = optimize.fmin_cg(f, x0, fprime=gradf, args=args)
>>> print 'res1 = ', res1
Optimization terminated successfully.
Current function value: 1.617021
Iterations: 2
Function evaluations: 5
Gradient evaluations: 5
res1 = [-1.80851064 -0.25531915]
Example 2: solve the same problem using the `minimize` function.
(This `myopts` dictionary shows all of the available options,
although in practice only non-default values would be needed.
The returned value will be a dictionary.)
>>> opts = {'maxiter' : None, # default value.
... 'disp' : True, # non-default value.
... 'gtol' : 1e-5, # default value.
... 'norm' : np.inf, # default value.
... 'eps' : 1.4901161193847656e-08} # default value.
>>> res2 = optimize.minimize(f, x0, jac=gradf, args=args,
... method='CG', options=opts)
Optimization terminated successfully.
Current function value: 1.617021
Iterations: 2
Function evaluations: 5
Gradient evaluations: 5
>>> res2.x # minimum found
array([-1.80851064 -0.25531915])
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _minimize_cg(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nfev'], res['njev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_cg(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
conjugate gradient algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
f = fun
fprime = jac
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
if maxiter is None:
maxiter = len(x0) * 200
func_calls, f = wrap_function(f, args)
if fprime is None:
grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
else:
grad_calls, myfprime = wrap_function(fprime, args)
gfk = myfprime(x0)
k = 0
xk = x0
old_fval = f(xk)
old_old_fval = None
if retall:
allvecs = [xk]
warnflag = 0
pk = -gfk
gnorm = vecnorm(gfk, ord=norm)
while (gnorm > gtol) and (k < maxiter):
deltak = numpy.dot(gfk, gfk)
try:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval,
old_old_fval, c2=0.4)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
xk = xk + alpha_k * pk
if retall:
allvecs.append(xk)
if gfkp1 is None:
gfkp1 = myfprime(xk)
yk = gfkp1 - gfk
beta_k = max(0, numpy.dot(yk, gfkp1) / deltak)
pk = -gfkp1 + beta_k * pk
gfk = gfkp1
gnorm = vecnorm(gfk, ord=norm)
if callback is not None:
callback(xk)
k += 1
fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
result = OptimizeResult(fun=fval, jac=gfk, nfev=func_calls[0],
njev=grad_calls[0], status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0,
callback=None):
"""
Unconstrained minimization of a function using the Newton-CG method.
Parameters
----------
f : callable ``f(x, *args)``
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable ``f'(x, *args)``
Gradient of f.
fhess_p : callable ``fhess_p(x, p, *args)``, optional
Function which computes the Hessian of f times an
arbitrary vector, p.
fhess : callable ``fhess(x, *args)``, optional
Function to compute the Hessian matrix of f.
args : tuple, optional
Extra arguments passed to f, fprime, fhess_p, and fhess
(the same set of extra arguments is supplied to all of
these functions).
epsilon : float or ndarray, optional
If fhess is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function which is called after
each iteration. Called as callback(xk), where xk is the
current parameter vector.
avextol : float, optional
Convergence is assumed when the average relative error in
the minimizer falls below this amount.
maxiter : int, optional
Maximum number of iterations to perform.
full_output : bool, optional
If True, return the optional outputs.
disp : bool, optional
If True, print convergence message.
retall : bool, optional
If True, return a list of results at each iteration.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. ``f(xopt) == fopt``.
fopt : float
Value of the function at xopt, i.e. ``fopt = f(xopt)``.
fcalls : int
Number of function calls made.
gcalls : int
Number of gradient calls made.
hcalls : int
Number of hessian calls made.
warnflag : int
Warnings generated by the algorithm.
1 : Maximum number of iterations exceeded.
allvecs : list
The result at each iteration, if retall is True (see below).
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Newton-CG' `method` in particular.
Notes
-----
Only one of `fhess_p` or `fhess` need to be given. If `fhess`
is provided, then `fhess_p` will be ignored. If neither `fhess`
nor `fhess_p` is provided, then the hessian product will be
approximated using finite differences on `fprime`. `fhess_p`
must compute the hessian times an arbitrary vector. If it is not
given, finite-differences on `fprime` are used to compute
it.
Newton-CG methods are also called truncated Newton methods. This
function differs from scipy.optimize.fmin_tnc because
1. scipy.optimize.fmin_ncg is written purely in python using numpy
and scipy while scipy.optimize.fmin_tnc calls a C function.
2. scipy.optimize.fmin_ncg is only for unconstrained minimization
while scipy.optimize.fmin_tnc is for unconstrained minimization
or box constrained minimization. (Box constraints give
lower and upper bounds for each variable separately.)
References
----------
Wright & Nocedal, 'Numerical Optimization', 1999, pg. 140.
"""
opts = {'xtol': avextol,
'eps': epsilon,
'maxiter': maxiter,
'disp': disp,
'return_all': retall}
res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p,
callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['nfev'], res['njev'],
res['nhev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
callback=None, xtol=1e-5, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
Note that the `jac` parameter (Jacobian) is required.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Average relative error in solution `xopt` acceptable for
convergence.
maxiter : int
Maximum number of iterations to perform.
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
if jac is None:
raise ValueError('Jacobian is required for Newton-CG method')
f = fun
fprime = jac
fhess_p = hessp
fhess = hess
avextol = xtol
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
fcalls, f = wrap_function(f, args)
gcalls, fprime = wrap_function(fprime, args)
hcalls = 0
if maxiter is None:
maxiter = len(x0)*200
xtol = len(x0) * avextol
update = [2 * xtol]
xk = x0
if retall:
allvecs = [xk]
k = 0
old_fval = f(x0)
old_old_fval = None
float64eps = numpy.finfo(numpy.float64).eps
warnflag = 0
while (numpy.add.reduce(numpy.abs(update)) > xtol) and (k < maxiter):
# Compute a search direction pk by applying the CG method to
# del2 f(xk) p = - grad f(xk) starting from 0.
b = -fprime(xk)
maggrad = numpy.add.reduce(numpy.abs(b))
eta = numpy.min([0.5, numpy.sqrt(maggrad)])
termcond = eta * maggrad
xsupi = zeros(len(x0), dtype=x0.dtype)
ri = -b
psupi = -ri
i = 0
dri0 = numpy.dot(ri, ri)
if fhess is not None: # you want to compute hessian once.
A = fhess(*(xk,) + args)
hcalls = hcalls + 1
while numpy.add.reduce(numpy.abs(ri)) > termcond:
if fhess is None:
if fhess_p is None:
Ap = approx_fhess_p(xk, psupi, fprime, epsilon)
else:
Ap = fhess_p(xk, psupi, *args)
hcalls = hcalls + 1
else:
Ap = numpy.dot(A, psupi)
# check curvature
Ap = asarray(Ap).squeeze() # get rid of matrices...
curv = numpy.dot(psupi, Ap)
if 0 <= curv <= 3 * float64eps:
break
elif curv < 0:
if (i > 0):
break
else:
# fall back to steepest descent direction
xsupi = dri0 / (-curv) * b
break
alphai = dri0 / curv
xsupi = xsupi + alphai * psupi
ri = ri + alphai * Ap
dri1 = numpy.dot(ri, ri)
betai = dri1 / dri0
psupi = -ri + betai * psupi
i = i + 1
dri0 = dri1 # update numpy.dot(ri,ri) for next time.
pk = xsupi # search direction is solution to system.
gfk = -b # gradient at xk
try:
alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, fprime, xk, pk, gfk,
old_fval, old_old_fval)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
update = alphak * pk
xk = xk + update # upcast if necessary
if callback is not None:
callback(xk)
if retall:
allvecs.append(xk)
k += 1
fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % fcalls[0])
print(" Gradient evaluations: %d" % gcalls[0])
print(" Hessian evaluations: %d" % hcalls)
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % fcalls[0])
print(" Gradient evaluations: %d" % gcalls[0])
print(" Hessian evaluations: %d" % hcalls)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % fcalls[0])
print(" Gradient evaluations: %d" % gcalls[0])
print(" Hessian evaluations: %d" % hcalls)
result = OptimizeResult(fun=fval, jac=gfk, nfev=fcalls[0], njev=gcalls[0],
nhev=hcalls, status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500,
full_output=0, disp=1):
"""Bounded minimization for scalar functions.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized (must accept and return scalars).
x1, x2 : float or array scalar
The optimization bounds.
args : tuple, optional
Extra arguments passed to function.
xtol : float, optional
The convergence tolerance.
maxfun : int, optional
Maximum number of function evaluations allowed.
full_output : bool, optional
If True, return optional outputs.
disp : int, optional
If non-zero, print messages.
0 : no message printing.
1 : non-convergence notification messages only.
2 : print a message on convergence too.
3 : print iteration results.
Returns
-------
xopt : ndarray
Parameters (over given interval) which minimize the
objective function.
fval : number
The function value at the minimum point.
ierr : int
An error flag (0 if converged, 1 if maximum number of
function calls reached).
numfunc : int
The number of function calls made.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Bounded' `method` in particular.
Notes
-----
Finds a local minimizer of the scalar function `func` in the
interval x1 < xopt < x2 using Brent's method. (See `brent`
for auto-bracketing).
"""
options = {'xatol': xtol,
'maxiter': maxfun,
'disp': disp}
res = _minimize_scalar_bounded(func, (x1, x2), args, **options)
if full_output:
return res['x'], res['fun'], res['status'], res['nfev']
else:
return res['x']
def _minimize_scalar_bounded(func, bounds, args=(),
xatol=1e-5, maxiter=500, disp=0,
**unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
xatol : float
Absolute error in solution `xopt` acceptable for convergence.
"""
_check_unknown_options(unknown_options)
maxfun = maxiter
# Test bounds are of correct form
if len(bounds) != 2:
raise ValueError('bounds must have two elements.')
x1, x2 = bounds
if not (is_array_scalar(x1) and is_array_scalar(x2)):
raise ValueError("Optimisation bounds must be scalars"
" or array scalars.")
if x1 > x2:
raise ValueError("The lower bound exceeds the upper bound.")
flag = 0
header = ' Func-count x f(x) Procedure'
step = ' initial'
sqrt_eps = sqrt(2.2e-16)
golden_mean = 0.5 * (3.0 - sqrt(5.0))
a, b = x1, x2
fulc = a + golden_mean * (b - a)
nfc, xf = fulc, fulc
rat = e = 0.0
x = xf
fx = func(x, *args)
num = 1
fmin_data = (1, xf, fx)
ffulc = fnfc = fx
xm = 0.5 * (a + b)
tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0
tol2 = 2.0 * tol1
if disp > 2:
print (" ")
print (header)
print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)))
while (numpy.abs(xf - xm) > (tol2 - 0.5 * (b - a))):
golden = 1
# Check for parabolic fit
if numpy.abs(e) > tol1:
golden = 0
r = (xf - nfc) * (fx - ffulc)
q = (xf - fulc) * (fx - fnfc)
p = (xf - fulc) * q - (xf - nfc) * r
q = 2.0 * (q - r)
if q > 0.0:
p = -p
q = numpy.abs(q)
r = e
e = rat
# Check for acceptability of parabola
if ((numpy.abs(p) < numpy.abs(0.5*q*r)) and (p > q*(a - xf)) and
(p < q * (b - xf))):
rat = (p + 0.0) / q
x = xf + rat
step = ' parabolic'
if ((x - a) < tol2) or ((b - x) < tol2):
si = numpy.sign(xm - xf) + ((xm - xf) == 0)
rat = tol1 * si
else: # do a golden section step
golden = 1
if golden: # Do a golden-section step
if xf >= xm:
e = a - xf
else:
e = b - xf
rat = golden_mean*e
step = ' golden'
si = numpy.sign(rat) + (rat == 0)
x = xf + si * numpy.max([numpy.abs(rat), tol1])
fu = func(x, *args)
num += 1
fmin_data = (num, x, fu)
if disp > 2:
print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)))
if fu <= fx:
if x >= xf:
a = xf
else:
b = xf
fulc, ffulc = nfc, fnfc
nfc, fnfc = xf, fx
xf, fx = x, fu
else:
if x < xf:
a = x
else:
b = x
if (fu <= fnfc) or (nfc == xf):
fulc, ffulc = nfc, fnfc
nfc, fnfc = x, fu
elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc):
fulc, ffulc = x, fu
xm = 0.5 * (a + b)
tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0
tol2 = 2.0 * tol1
if num >= maxfun:
flag = 1
break
fval = fx
if disp > 0:
_endprint(x, flag, fval, maxfun, xatol, disp)
result = OptimizeResult(fun=fval, status=flag, success=(flag == 0),
message={0: 'Solution found.',
1: 'Maximum number of function calls '
'reached.'}.get(flag, ''),
x=xf, nfev=num)
return result
class Brent:
#need to rethink design of __init__
def __init__(self, func, args=(), tol=1.48e-8, maxiter=500,
full_output=0):
self.func = func
self.args = args
self.tol = tol
self.maxiter = maxiter
self._mintol = 1.0e-11
self._cg = 0.3819660
self.xmin = None
self.fval = None
self.iter = 0
self.funcalls = 0
# need to rethink design of set_bracket (new options, etc)
def set_bracket(self, brack=None):
self.brack = brack
def get_bracket_info(self):
#set up
func = self.func
args = self.args
brack = self.brack
### BEGIN core bracket_info code ###
### carefully DOCUMENT any CHANGES in core ##
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
xb=brack[1], args=args)
elif len(brack) == 3:
xa, xb, xc = brack
if (xa > xc): # swap so xa < xc can be assumed
xc, xa = xa, xc
if not ((xa < xb) and (xb < xc)):
raise ValueError("Not a bracketing interval.")
fa = func(*((xa,) + args))
fb = func(*((xb,) + args))
fc = func(*((xc,) + args))
if not ((fb < fa) and (fb < fc)):
raise ValueError("Not a bracketing interval.")
funcalls = 3
else:
raise ValueError("Bracketing interval must be "
"length 2 or 3 sequence.")
### END core bracket_info code ###
return xa, xb, xc, fa, fb, fc, funcalls
def optimize(self):
# set up for optimization
func = self.func
xa, xb, xc, fa, fb, fc, funcalls = self.get_bracket_info()
_mintol = self._mintol
_cg = self._cg
#################################
#BEGIN CORE ALGORITHM
#################################
x = w = v = xb
fw = fv = fx = func(*((x,) + self.args))
if (xa < xc):
a = xa
b = xc
else:
a = xc
b = xa
deltax = 0.0
funcalls = 1
iter = 0
while (iter < self.maxiter):
tol1 = self.tol * numpy.abs(x) + _mintol
tol2 = 2.0 * tol1
xmid = 0.5 * (a + b)
# check for convergence
if numpy.abs(x - xmid) < (tol2 - 0.5 * (b - a)):
break
# XXX In the first iteration, rat is only bound in the true case
# of this conditional. This used to cause an UnboundLocalError
# (gh-4140). It should be set before the if (but to what?).
if (numpy.abs(deltax) <= tol1):
if (x >= xmid):
deltax = a - x # do a golden section step
else:
deltax = b - x
rat = _cg * deltax
else: # do a parabolic step
tmp1 = (x - w) * (fx - fv)
tmp2 = (x - v) * (fx - fw)
p = (x - v) * tmp2 - (x - w) * tmp1
tmp2 = 2.0 * (tmp2 - tmp1)
if (tmp2 > 0.0):
p = -p
tmp2 = numpy.abs(tmp2)
dx_temp = deltax
deltax = rat
# check parabolic fit
if ((p > tmp2 * (a - x)) and (p < tmp2 * (b - x)) and
(numpy.abs(p) < numpy.abs(0.5 * tmp2 * dx_temp))):
rat = p * 1.0 / tmp2 # if parabolic step is useful.
u = x + rat
if ((u - a) < tol2 or (b - u) < tol2):
if xmid - x >= 0:
rat = tol1
else:
rat = -tol1
else:
if (x >= xmid):
deltax = a - x # if it's not do a golden section step
else:
deltax = b - x
rat = _cg * deltax
if (numpy.abs(rat) < tol1): # update by at least tol1
if rat >= 0:
u = x + tol1
else:
u = x - tol1
else:
u = x + rat
fu = func(*((u,) + self.args)) # calculate new output value
funcalls += 1
if (fu > fx): # if it's bigger than current
if (u < x):
a = u
else:
b = u
if (fu <= fw) or (w == x):
v = w
w = u
fv = fw
fw = fu
elif (fu <= fv) or (v == x) or (v == w):
v = u
fv = fu
else:
if (u >= x):
a = x
else:
b = x
v = w
w = x
x = u
fv = fw
fw = fx
fx = fu
iter += 1
#################################
#END CORE ALGORITHM
#################################
self.xmin = x
self.fval = fx
self.iter = iter
self.funcalls = funcalls
def get_result(self, full_output=False):
if full_output:
return self.xmin, self.fval, self.iter, self.funcalls
else:
return self.xmin
def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500):
"""
Given a function of one-variable and a possible bracketing interval,
return the minimum of the function isolated to a fractional precision of
tol.
Parameters
----------
func : callable f(x,*args)
Objective function.
args : tuple, optional
Additional arguments (if present).
brack : tuple, optional
Triple (a,b,c) where (a<b<c) and func(b) <
func(a),func(c). If bracket consists of two numbers (a,c)
then they are assumed to be a starting interval for a
downhill bracket search (see `bracket`); it doesn't always
mean that the obtained solution will satisfy a<=x<=c.
tol : float, optional
Stop if between iteration change is less than `tol`.
full_output : bool, optional
If True, return all output args (xmin, fval, iter,
funcalls).
maxiter : int, optional
Maximum number of iterations in solution.
Returns
-------
xmin : ndarray
Optimum point.
fval : float
Optimum value.
iter : int
Number of iterations.
funcalls : int
Number of objective function evaluations made.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Brent' `method` in particular.
Notes
-----
Uses inverse parabolic interpolation when possible to speed up
convergence of golden section method.
"""
options = {'xtol': tol,
'maxiter': maxiter}
res = _minimize_scalar_brent(func, brack, args, **options)
if full_output:
return res['x'], res['fun'], res['nit'], res['nfev']
else:
return res['x']
def _minimize_scalar_brent(func, brack=None, args=(),
xtol=1.48e-8, maxiter=500,
**unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
Notes
-----
Uses inverse parabolic interpolation when possible to speed up
convergence of golden section method.
"""
_check_unknown_options(unknown_options)
tol = xtol
if tol < 0:
raise ValueError('tolerance should be >= 0, got %r' % tol)
brent = Brent(func=func, args=args, tol=tol,
full_output=True, maxiter=maxiter)
brent.set_bracket(brack)
brent.optimize()
x, fval, nit, nfev = brent.get_result(full_output=True)
return OptimizeResult(fun=fval, x=x, nit=nit, nfev=nfev)
def golden(func, args=(), brack=None, tol=_epsilon, full_output=0):
"""
Return the minimum of a function of one variable.
Given a function of one variable and a possible bracketing interval,
return the minimum of the function isolated to a fractional precision of
tol.
Parameters
----------
func : callable func(x,*args)
Objective function to minimize.
args : tuple, optional
Additional arguments (if present), passed to func.
brack : tuple, optional
Triple (a,b,c), where (a<b<c) and func(b) <
func(a),func(c). If bracket consists of two numbers (a,
c), then they are assumed to be a starting interval for a
downhill bracket search (see `bracket`); it doesn't always
mean that obtained solution will satisfy a<=x<=c.
tol : float, optional
x tolerance stop criterion
full_output : bool, optional
If True, return optional outputs.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Golden' `method` in particular.
Notes
-----
Uses analog of bisection method to decrease the bracketed
interval.
"""
options = {'xtol': tol}
res = _minimize_scalar_golden(func, brack, args, **options)
if full_output:
return res['x'], res['fun'], res['nfev']
else:
return res['x']
def _minimize_scalar_golden(func, brack=None, args=(),
xtol=_epsilon, **unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
"""
_check_unknown_options(unknown_options)
tol = xtol
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
xb=brack[1], args=args)
elif len(brack) == 3:
xa, xb, xc = brack
if (xa > xc): # swap so xa < xc can be assumed
xc, xa = xa, xc
if not ((xa < xb) and (xb < xc)):
raise ValueError("Not a bracketing interval.")
fa = func(*((xa,) + args))
fb = func(*((xb,) + args))
fc = func(*((xc,) + args))
if not ((fb < fa) and (fb < fc)):
raise ValueError("Not a bracketing interval.")
funcalls = 3
else:
raise ValueError("Bracketing interval must be length 2 or 3 sequence.")
_gR = 0.61803399
_gC = 1.0 - _gR
x3 = xc
x0 = xa
if (numpy.abs(xc - xb) > numpy.abs(xb - xa)):
x1 = xb
x2 = xb + _gC * (xc - xb)
else:
x2 = xb
x1 = xb - _gC * (xb - xa)
f1 = func(*((x1,) + args))
f2 = func(*((x2,) + args))
funcalls += 2
while (numpy.abs(x3 - x0) > tol * (numpy.abs(x1) + numpy.abs(x2))):
if (f2 < f1):
x0 = x1
x1 = x2
x2 = _gR * x1 + _gC * x3
f1 = f2
f2 = func(*((x2,) + args))
else:
x3 = x2
x2 = x1
x1 = _gR * x2 + _gC * x0
f2 = f1
f1 = func(*((x1,) + args))
funcalls += 1
if (f1 < f2):
xmin = x1
fval = f1
else:
xmin = x2
fval = f2
return OptimizeResult(fun=fval, nfev=funcalls, x=xmin)
def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000):
"""
Bracket the minimum of the function.
Given a function and distinct initial points, search in the
downhill direction (as defined by the initital points) and return
new points xa, xb, xc that bracket the minimum of the function
f(xa) > f(xb) < f(xc). It doesn't always mean that obtained
solution will satisfy xa<=x<=xb
Parameters
----------
func : callable f(x,*args)
Objective function to minimize.
xa, xb : float, optional
Bracketing interval. Defaults `xa` to 0.0, and `xb` to 1.0.
args : tuple, optional
Additional arguments (if present), passed to `func`.
grow_limit : float, optional
Maximum grow limit. Defaults to 110.0
maxiter : int, optional
Maximum number of iterations to perform. Defaults to 1000.
Returns
-------
xa, xb, xc : float
Bracket.
fa, fb, fc : float
Objective function values in bracket.
funcalls : int
Number of function evaluations made.
"""
_gold = 1.618034
_verysmall_num = 1e-21
fa = func(*(xa,) + args)
fb = func(*(xb,) + args)
if (fa < fb): # Switch so fa > fb
xa, xb = xb, xa
fa, fb = fb, fa
xc = xb + _gold * (xb - xa)
fc = func(*((xc,) + args))
funcalls = 3
iter = 0
while (fc < fb):
tmp1 = (xb - xa) * (fb - fc)
tmp2 = (xb - xc) * (fb - fa)
val = tmp2 - tmp1
if numpy.abs(val) < _verysmall_num:
denom = 2.0 * _verysmall_num
else:
denom = 2.0 * val
w = xb - ((xb - xc) * tmp2 - (xb - xa) * tmp1) / denom
wlim = xb + grow_limit * (xc - xb)
if iter > maxiter:
raise RuntimeError("Too many iterations.")
iter += 1
if (w - xc) * (xb - w) > 0.0:
fw = func(*((w,) + args))
funcalls += 1
if (fw < fc):
xa = xb
xb = w
fa = fb
fb = fw
return xa, xb, xc, fa, fb, fc, funcalls
elif (fw > fb):
xc = w
fc = fw
return xa, xb, xc, fa, fb, fc, funcalls
w = xc + _gold * (xc - xb)
fw = func(*((w,) + args))
funcalls += 1
elif (w - wlim)*(wlim - xc) >= 0.0:
w = wlim
fw = func(*((w,) + args))
funcalls += 1
elif (w - wlim)*(xc - w) > 0.0:
fw = func(*((w,) + args))
funcalls += 1
if (fw < fc):
xb = xc
xc = w
w = xc + _gold * (xc - xb)
fb = fc
fc = fw
fw = func(*((w,) + args))
funcalls += 1
else:
w = xc + _gold * (xc - xb)
fw = func(*((w,) + args))
funcalls += 1
xa = xb
xb = xc
xc = w
fa = fb
fb = fc
fc = fw
return xa, xb, xc, fa, fb, fc, funcalls
def _linesearch_powell(func, p, xi, tol=1e-3):
"""Line-search algorithm using fminbound.
Find the minimium of the function ``func(x0+ alpha*direc)``.
"""
def myfunc(alpha):
return func(p + alpha*xi)
alpha_min, fret, iter, num = brent(myfunc, full_output=1, tol=tol)
xi = alpha_min*xi
return squeeze(fret), p + xi, xi
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,
maxfun=None, full_output=0, disp=1, retall=0, callback=None,
direc=None):
"""
Minimize a function using modified Powell's method. This method
only uses function values, not derivatives.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func.
callback : callable, optional
An optional user-supplied function, called after each
iteration. Called as ``callback(xk)``, where ``xk`` is the
current parameter vector.
direc : ndarray, optional
Initial direction set.
xtol : float, optional
Line-search error tolerance.
ftol : float, optional
Relative error in ``func(xopt)`` acceptable for convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : int, optional
Maximum number of function evaluations to make.
full_output : bool, optional
If True, fopt, xi, direc, iter, funcalls, and
warnflag are returned.
disp : bool, optional
If True, print convergence messages.
retall : bool, optional
If True, return a list of the solution at each iteration.
Returns
-------
xopt : ndarray
Parameter which minimizes `func`.
fopt : number
Value of function at minimum: ``fopt = func(xopt)``.
direc : ndarray
Current direction set.
iter : int
Number of iterations.
funcalls : int
Number of function calls made.
warnflag : int
Integer warning flag:
1 : Maximum number of function evaluations.
2 : Maximum number of iterations.
allvecs : list
List of solutions at each iteration.
See also
--------
minimize: Interface to unconstrained minimization algorithms for
multivariate functions. See the 'Powell' `method` in particular.
Notes
-----
Uses a modification of Powell's method to find the minimum of
a function of N variables. Powell's method is a conjugate
direction method.
The algorithm has two loops. The outer loop
merely iterates over the inner loop. The inner loop minimizes
over each current direction in the direction set. At the end
of the inner loop, if certain conditions are met, the direction
that gave the largest decrease is dropped and replaced with
the difference between the current estiamted x and the estimated
x from the beginning of the inner-loop.
The technical conditions for replacing the direction of greatest
increase amount to checking that
1. No further gain can be made along the direction of greatest increase
from that iteration.
2. The direction of greatest increase accounted for a large sufficient
fraction of the decrease in the function value from that iteration of
the inner loop.
References
----------
Powell M.J.D. (1964) An efficient method for finding the minimum of a
function of several variables without calculating derivatives,
Computer Journal, 7 (2):155-162.
Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.:
Numerical Recipes (any edition), Cambridge University Press
"""
opts = {'xtol': xtol,
'ftol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'direc': direc,
'return_all': retall}
res = _minimize_powell(func, x0, args, callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['direc'], res['nit'],
res['nfev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_powell(func, x0, args=(), callback=None,
xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None,
disp=False, direc=None, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
modified Powell algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfev : int
Maximum number of function evaluations to make.
direc : ndarray
Initial set of direction vectors for the Powell method.
"""
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
# we need to use a mutable object here that we can update in the
# wrapper function
fcalls, func = wrap_function(func, args)
x = asarray(x0).flatten()
if retall:
allvecs = [x]
N = len(x)
if maxiter is None:
maxiter = N * 1000
if maxfun is None:
maxfun = N * 1000
if direc is None:
direc = eye(N, dtype=float)
else:
direc = asarray(direc, dtype=float)
fval = squeeze(func(x))
x1 = x.copy()
iter = 0
ilist = list(range(N))
while True:
fx = fval
bigind = 0
delta = 0.0
for i in ilist:
direc1 = direc[i]
fx2 = fval
fval, x, direc1 = _linesearch_powell(func, x, direc1,
tol=xtol * 100)
if (fx2 - fval) > delta:
delta = fx2 - fval
bigind = i
iter += 1
if callback is not None:
callback(x)
if retall:
allvecs.append(x)
bnd = ftol * (numpy.abs(fx) + numpy.abs(fval)) + 1e-20
if 2.0 * (fx - fval) <= bnd:
break
if fcalls[0] >= maxfun:
break
if iter >= maxiter:
break
# Construct the extrapolated point
direc1 = x - x1
x2 = 2*x - x1
x1 = x.copy()
fx2 = squeeze(func(x2))
if (fx > fx2):
t = 2.0*(fx + fx2 - 2.0*fval)
temp = (fx - fval - delta)
t *= temp*temp
temp = fx - fx2
t -= delta*temp*temp
if t < 0.0:
fval, x, direc1 = _linesearch_powell(func, x, direc1,
tol=xtol*100)
direc[bigind] = direc[-1]
direc[-1] = direc1
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print("Warning: " + msg)
elif iter >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iter)
print(" Function evaluations: %d" % fcalls[0])
x = squeeze(x)
result = OptimizeResult(fun=fval, direc=direc, nit=iter, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x)
if retall:
result['allvecs'] = allvecs
return result
def _endprint(x, flag, fval, maxfun, xtol, disp):
if flag == 0:
if disp > 1:
print("\nOptimization terminated successfully;\n"
"The returned value satisfies the termination criteria\n"
"(using xtol = ", xtol, ")")
if flag == 1:
if disp:
print("\nMaximum number of function evaluations exceeded --- "
"increase maxfun argument.\n")
return
def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin,
disp=False):
"""Minimize a function over a given range by brute force.
Uses the "brute force" method, i.e. computes the function's value
at each point of a multidimensional grid of points, to find the global
minimum of the function.
The function is evaluated everywhere in the range with the datatype of the
first call to the function, as enforced by the ``vectorize`` NumPy
function. The value and type of the function evaluation returned when
``full_output=True`` are affected in addition by the ``finish`` argument
(see Notes).
Parameters
----------
func : callable
The objective function to be minimized. Must be in the
form ``f(x, *args)``, where ``x`` is the argument in
the form of a 1-D array and ``args`` is a tuple of any
additional fixed parameters needed to completely specify
the function.
ranges : tuple
Each component of the `ranges` tuple must be either a
"slice object" or a range tuple of the form ``(low, high)``.
The program uses these to create the grid of points on which
the objective function will be computed. See `Note 2` for
more detail.
args : tuple, optional
Any additional fixed parameters needed to completely specify
the function.
Ns : int, optional
Number of grid points along the axes, if not otherwise
specified. See `Note2`.
full_output : bool, optional
If True, return the evaluation grid and the objective function's
values on it.
finish : callable, optional
An optimization function that is called with the result of brute force
minimization as initial guess. `finish` should take the initial guess
as positional argument, and take `args`, `full_output` and `disp`
as keyword arguments. Use None if no "polishing" function is to be
used. See Notes for more details.
disp : bool, optional
Set to True to print convergence messages.
Returns
-------
x0 : ndarray
A 1-D array containing the coordinates of a point at which the
objective function had its minimum value. (See `Note 1` for
which point is returned.)
fval : float
Function value at the point `x0`. (Returned when `full_output` is
True.)
grid : tuple
Representation of the evaluation grid. It has the same
length as `x0`. (Returned when `full_output` is True.)
Jout : ndarray
Function values at each point of the evaluation
grid, `i.e.`, ``Jout = func(*grid)``. (Returned
when `full_output` is True.)
See Also
--------
basinhopping, differential_evolution
Notes
-----
*Note 1*: The program finds the gridpoint at which the lowest value
of the objective function occurs. If `finish` is None, that is the
point returned. When the global minimum occurs within (or not very far
outside) the grid's boundaries, and the grid is fine enough, that
point will be in the neighborhood of the gobal minimum.
However, users often employ some other optimization program to
"polish" the gridpoint values, `i.e.`, to seek a more precise
(local) minimum near `brute's` best gridpoint.
The `brute` function's `finish` option provides a convenient way to do
that. Any polishing program used must take `brute's` output as its
initial guess as a positional argument, and take `brute's` input values
for `args` and `full_output` as keyword arguments, otherwise an error
will be raised.
`brute` assumes that the `finish` function returns a tuple in the form:
``(xmin, Jmin, ... , statuscode)``, where ``xmin`` is the minimizing value
of the argument, ``Jmin`` is the minimum value of the objective function,
"..." may be some other returned values (which are not used by `brute`),
and ``statuscode`` is the status code of the `finish` program.
Note that when `finish` is not None, the values returned are those
of the `finish` program, *not* the gridpoint ones. Consequently,
while `brute` confines its search to the input grid points,
the `finish` program's results usually will not coincide with any
gridpoint, and may fall outside the grid's boundary.
*Note 2*: The grid of points is a `numpy.mgrid` object.
For `brute` the `ranges` and `Ns` inputs have the following effect.
Each component of the `ranges` tuple can be either a slice object or a
two-tuple giving a range of values, such as (0, 5). If the component is a
slice object, `brute` uses it directly. If the component is a two-tuple
range, `brute` internally converts it to a slice object that interpolates
`Ns` points from its low-value to its high-value, inclusive.
Examples
--------
We illustrate the use of `brute` to seek the global minimum of a function
of two variables that is given as the sum of a positive-definite
quadratic and two deep "Gaussian-shaped" craters. Specifically, define
the objective function `f` as the sum of three other functions,
``f = f1 + f2 + f3``. We suppose each of these has a signature
``(z, *params)``, where ``z = (x, y)``, and ``params`` and the functions
are as defined below.
>>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
>>> def f1(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
>>> def f2(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
>>> def f3(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
>>> def f(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return f1(z, *params) + f2(z, *params) + f3(z, *params)
Thus, the objective function may have local minima near the minimum
of each of the three functions of which it is composed. To
use `fmin` to polish its gridpoint result, we may then continue as
follows:
>>> rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))
>>> from scipy import optimize
>>> resbrute = optimize.brute(f, rranges, args=params, full_output=True,
finish=optimize.fmin)
>>> resbrute[0] # global minimum
array([-1.05665192, 1.80834843])
>>> resbrute[1] # function value at global minimum
-3.4085818767
Note that if `finish` had been set to None, we would have gotten the
gridpoint [-1.0 1.75] where the rounded function value is -2.892.
"""
N = len(ranges)
if N > 40:
raise ValueError("Brute Force not possible with more "
"than 40 variables.")
lrange = list(ranges)
for k in range(N):
if type(lrange[k]) is not type(slice(None)):
if len(lrange[k]) < 3:
lrange[k] = tuple(lrange[k]) + (complex(Ns),)
lrange[k] = slice(*lrange[k])
if (N == 1):
lrange = lrange[0]
def _scalarfunc(*params):
params = squeeze(asarray(params))
return func(params, *args)
vecfunc = vectorize(_scalarfunc)
grid = mgrid[lrange]
if (N == 1):
grid = (grid,)
Jout = vecfunc(*grid)
Nshape = shape(Jout)
indx = argmin(Jout.ravel(), axis=-1)
Nindx = zeros(N, int)
xmin = zeros(N, float)
for k in range(N - 1, -1, -1):
thisN = Nshape[k]
Nindx[k] = indx % Nshape[k]
indx = indx // thisN
for k in range(N):
xmin[k] = grid[k][tuple(Nindx)]
Jmin = Jout[tuple(Nindx)]
if (N == 1):
grid = grid[0]
xmin = xmin[0]
if callable(finish):
vals = finish(func, xmin, args=args, full_output=1, disp=disp)
xmin = vals[0]
Jmin = vals[1]
if vals[-1] > 0:
if disp:
print("Warning: Final optimization did not succeed")
if full_output:
return xmin, Jmin, grid, Jout
else:
return xmin
def show_options(solver=None, method=None, disp=True):
"""
Show documentation for additional options of optimization solvers.
These are method-specific options that can be supplied through the
``options`` dict.
Parameters
----------
solver : str
Type of optimization solver. One of 'minimize', 'minimize_scalar',
'root', or 'linprog'.
method : str, optional
If not given, shows all methods of the specified solver. Otherwise,
show only the options for the specified method. Valid values
corresponds to methods' names of respective solver (e.g. 'BFGS' for
'minimize').
disp : bool, optional
Whether to print the result rather than returning it.
Returns
-------
text
Either None (for disp=False) or the text string (disp=True)
Notes
-----
The solver-specific methods are:
`scipy.optimize.minimize`
- :ref:`Nelder-Mead <optimize.minimize-neldermead>`
- :ref:`Powell <optimize.minimize-powell>`
- :ref:`CG <optimize.minimize-cg>`
- :ref:`BFGS <optimize.minimize-bfgs>`
- :ref:`Newton-CG <optimize.minimize-newtoncg>`
- :ref:`L-BFGS-B <optimize.minimize-lbfgsb>`
- :ref:`TNC <optimize.minimize-tnc>`
- :ref:`COBYLA <optimize.minimize-cobyla>`
- :ref:`SLSQP <optimize.minimize-slsqp>`
- :ref:`dogleg <optimize.minimize-dogleg>`
- :ref:`trust-ncg <optimize.minimize-trustncg>`
`scipy.optimize.root`
- :ref:`hybr <optimize.root-hybr>`
- :ref:`lm <optimize.root-lm>`
- :ref:`broyden1 <optimize.root-broyden1>`
- :ref:`broyden2 <optimize.root-broyden2>`
- :ref:`anderson <optimize.root-anderson>`
- :ref:`linearmixing <optimize.root-linearmixing>`
- :ref:`diagbroyden <optimize.root-diagbroyden>`
- :ref:`excitingmixing <optimize.root-excitingmixing>`
- :ref:`krylov <optimize.root-krylov>`
- :ref:`df-sane <optimize.root-dfsane>`
`scipy.optimize.minimize_scalar`
- :ref:`brent <optimize.minimize_scalar-brent>`
- :ref:`golden <optimize.minimize_scalar-golden>`
- :ref:`bounded <optimize.minimize_scalar-bounded>`
`scipy.optimize.linprog`
- :ref:`simplex <optimize.linprog-simplex>`
"""
import textwrap
doc_routines = {
'minimize': (
('bfgs', 'scipy.optimize.optimize._minimize_bfgs'),
('cg', 'scipy.optimize.optimize._minimize_cg'),
('cobyla', 'scipy.optimize.cobyla._minimize_cobyla'),
('dogleg', 'scipy.optimize._trustregion_dogleg._minimize_dogleg'),
('l-bfgs-b', 'scipy.optimize.lbfgsb._minimize_lbfgsb'),
('nelder-mead', 'scipy.optimize.optimize._minimize_neldermead'),
('newtoncg', 'scipy.optimize.optimize._minimize_newtoncg'),
('powell', 'scipy.optimize.optimize._minimize_powell'),
('slsqp', 'scipy.optimize.slsqp._minimize_slsqp'),
('tnc', 'scipy.optimize.tnc._minimize_tnc'),
('trust-ncg', 'scipy.optimize._trustregion_ncg._minimize_trust_ncg'),
),
'root': (
('hybr', 'scipy.optimize.minpack._root_hybr'),
('lm', 'scipy.optimize._root._root_leastsq'),
('broyden1', 'scipy.optimize._root._root_broyden1_doc'),
('broyden2', 'scipy.optimize._root._root_broyden2_doc'),
('anderson', 'scipy.optimize._root._root_anderson_doc'),
('diagbroyden', 'scipy.optimize._root._root_diagbroyden_doc'),
('excitingmixing', 'scipy.optimize._root._root_excitingmixing_doc'),
('linearmixing', 'scipy.optimize._root._root_linearmixing_doc'),
('krylov', 'scipy.optimize._root._root_krylov_doc'),
('df-sane', 'scipy.optimize._spectral._root_df_sane'),
),
'linprog': (
('simplex', 'scipy.optimize._linprog._linprog_simplex'),
),
'minimize_scalar': (
('brent', 'scipy.optimize.optimize._minimize_scalar_brent'),
('bounded', 'scipy.optimize.optimize._minimize_scalar_bounded'),
('golden', 'scipy.optimize.optimize._minimize_scalar_golden'),
),
}
if solver is None:
text = ["\n\n\n========\n", "minimize\n", "========\n"]
text.append(show_options('minimize', disp=False))
text.extend(["\n\n===============\n", "minimize_scalar\n",
"===============\n"])
text.append(show_options('minimize_scalar', disp=False))
text.extend(["\n\n\n====\n", "root\n",
"====\n"])
text.append(show_options('root', disp=False))
text.extend(['\n\n\n=======\n', 'linprog\n',
'=======\n'])
text.append(show_options('linprog', disp=False))
text = "".join(text)
else:
solver = solver.lower()
if solver not in doc_routines:
raise ValueError('Unknown solver %r' % (solver,))
if method is None:
text = []
for name, _ in doc_routines[solver]:
text.extend(["\n\n" + name, "\n" + "="*len(name) + "\n\n"])
text.append(show_options(solver, name, disp=False))
text = "".join(text)
else:
methods = dict(doc_routines[solver])
if method not in methods:
raise ValueError("Unknown method %r" % (method,))
name = methods[method]
# Import function object
parts = name.split('.')
mod_name = ".".join(parts[:-1])
__import__(mod_name)
obj = getattr(sys.modules[mod_name], parts[-1])
# Get doc
doc = obj.__doc__
if doc is not None:
text = textwrap.dedent(doc).strip()
else:
text = ""
if disp:
print(text)
return
else:
return text
def main():
import time
times = []
algor = []
x0 = [0.8, 1.2, 0.7]
print("Nelder-Mead Simplex")
print("===================")
start = time.time()
x = fmin(rosen, x0)
print(x)
times.append(time.time() - start)
algor.append('Nelder-Mead Simplex\t')
print()
print("Powell Direction Set Method")
print("===========================")
start = time.time()
x = fmin_powell(rosen, x0)
print(x)
times.append(time.time() - start)
algor.append('Powell Direction Set Method.')
print()
print("Nonlinear CG")
print("============")
start = time.time()
x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200)
print(x)
times.append(time.time() - start)
algor.append('Nonlinear CG \t')
print()
print("BFGS Quasi-Newton")
print("=================")
start = time.time()
x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('BFGS Quasi-Newton\t')
print()
print("BFGS approximate gradient")
print("=========================")
start = time.time()
x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100)
print(x)
times.append(time.time() - start)
algor.append('BFGS without gradient\t')
print()
print("Newton-CG with Hessian product")
print("==============================")
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess_p=rosen_hess_prod, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('Newton-CG with hessian product')
print()
print("Newton-CG with full Hessian")
print("===========================")
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess=rosen_hess, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('Newton-CG with full hessian')
print()
print("\nMinimizing the Rosenbrock function of order 3\n")
print(" Algorithm \t\t\t Seconds")
print("===========\t\t\t =========")
for k in range(len(algor)):
print(algor[k], "\t -- ", times[k])
if __name__ == "__main__":
main()
| nvoron23/scipy | scipy/optimize/optimize.py | Python | bsd-3-clause | 94,975 | [
"Gaussian"
] | b1f7a3bcde46f64c2865cd8f414ab572e54c5da6e5c1fbc76bcf6a3061113046 |
'''
Created on 2015-01-19
Unittest for assorted PyGeoDat components not included elsewhere.
@author: Andre R. Erler, GPL v3
'''
import unittest
import numpy as np
import os, sys, gc
import multiprocessing
import logging
from time import sleep
# import geodata modules
from utils.nctools import writeNetCDF
from geodata.misc import isZero, isOne, isEqual
from geodata.base import Variable, Axis, Dataset, Ensemble
from datasets.common import data_root
# import modules to be tested
# RAM disk settings ("global" variable)
RAM = True # whether or not to use a RAM disk
ramdisk = '/media/tmp/' # folder where RAM disk is mounted
NP = 2
ldebug = False
## tests for multiprocess module
class MultiProcessTest(unittest.TestCase):
def setUp(self):
''' create two test variables '''
pass
def tearDown(self):
''' clean up '''
gc.collect()
def testApplyAlongAxis(self):
''' test parallelized version of Numpy's apply_along_axis '''
from processing.multiprocess import apply_along_axis, test_aax, test_noaax
import functools
def run_test(fct, kw=0, axis=1, laax=True):
ff = functools.partial(fct, kw=kw)
shape = (500,100)
data = np.arange(np.prod(shape), dtype='float').reshape(shape)
assert data.shape == shape
# parallel implementation using my wrapper
pres = apply_along_axis(ff, axis, data, NP=2, ldebug=True, laax=laax)
print(pres.shape)
assert pres.shape == data.shape
assert isZero(pres.mean(axis=axis)+kw) and isZero(pres.std(axis=axis)-1.)
# straight-forward numpy version
res = np.apply_along_axis(ff, axis, data)
assert res.shape == data.shape
assert isZero(res.mean(axis=axis)+kw) and isZero(res.std(axis=axis)-1.)
# final test
assert isEqual(pres, res)
# run tests
run_test(test_noaax, kw=1, laax=False) # without Numpy's apply_along_axis
run_test(test_aax, kw=1, laax=True) # Numpy's apply_along_axis
def testAsyncPool(self):
''' test asyncPool wrapper '''
from processing.multiprocess import asyncPoolEC, test_func_dec, test_func_ec
args = [(n,) for n in range(5)]
kwargs = dict(wait=1)
ec = asyncPoolEC(test_func_dec, args, kwargs, NP=NP, ldebug=ldebug, ltrialnerror=True)
assert ec == 0
ec = asyncPoolEC(test_func_ec, args, kwargs, NP=NP, ldebug=ldebug, ltrialnerror=True)
assert ec == 4
ec = asyncPoolEC(test_func_ec, args, kwargs, NP=NP, ldebug=ldebug, ltrialnerror=False)
assert ec == 0
## tests related to loading datasets
class DatasetsTest(unittest.TestCase):
def setUp(self):
''' create two test variables '''
pass
def tearDown(self):
''' clean up '''
gc.collect()
def testExpArgList(self):
''' test function to expand argument lists '''
from datasets.common import expandArgumentList
# test arguments
args1 = [0,1,2]; args2 = ['0','1','2']; args3 = ['test']*3; arg4 = 'static1'; arg5 = 'static2'
explist = ['arg1','arg2','arg3']
# test inner product expansion
arg_list = expandArgumentList(arg1=args1, arg2=args2, arg3=args3, arg4=arg4, arg5=arg5,
expand_list=explist, lproduct='inner')
assert len(arg_list) == len(args1) and len(arg_list) == len(args2)
for args,arg1,arg2,arg3 in zip(arg_list,args1,args2,args3):
assert args['arg1'] == arg1
assert args['arg2'] == arg2
assert args['arg3'] == arg3
assert args['arg4'] == arg4
assert args['arg5'] == arg5
# test outer product expansion
arg_list = expandArgumentList(arg1=args1, arg2=args2, arg3=args3, arg4=arg4, arg5=arg5,
expand_list=explist, lproduct='outer')
assert len(arg_list) == len(args1) * len(args2) * len(args3)
n = 0
for arg1 in args1:
for arg2 in args2:
for arg3 in args3:
args = arg_list[n]
assert args['arg1'] == arg1
assert args['arg2'] == arg2
assert args['arg3'] == arg3
assert args['arg4'] == arg4
assert args['arg5'] == arg5
n += 1
assert n == len(arg_list)
# test simultaneous inner and outer product expansion
n1 = len(args2) * len(args3) / len(args1)
tmp1 = args1*int(n1)
arg_list = expandArgumentList(arg1=tmp1, arg2=args2, arg3=args3, arg4=arg4, arg5=arg5,
outer_list=['arg2','arg3'], inner_list=['arg1'])
assert len(arg_list) == len(args2) * len(args3) == len(tmp1)
n = 0
for arg2 in args2:
for arg3 in args3:
args = arg_list[n]
assert args['arg1'] == tmp1[n]
assert args['arg2'] == arg2
assert args['arg3'] == arg3
assert args['arg4'] == arg4
assert args['arg5'] == arg5
n += 1
assert n == len(arg_list)
# test parallel outer product expansion
assert len(args1) == len(args2) # necessary for test
arg_list = expandArgumentList(arg1=args1, arg2=args2, arg3=args3, arg4=arg4, arg5=arg5,
expand_list=[('arg1','arg2'),'arg3'], lproduct='outer')
assert len(arg_list) == len(args1) * len(args3)
n = 0
for arg1,arg2 in zip(args1,args2):
for arg3 in args3:
args = arg_list[n]
assert args['arg1'] == arg1
assert args['arg2'] == arg2
assert args['arg3'] == arg3
assert args['arg4'] == arg4
assert args['arg5'] == arg5
n += 1
assert n == len(arg_list)
# def testLoadDataset(self):
# ''' test universal dataset loading function '''
# from datasets.common import loadDataset
# from projects.WesternCanada.WRF_experiments import WRF_exps
# # test climtology
# ds = loadClim(name='PCIC', grid='arb2_d02', varlist=['precip'])
# assert isinstance(ds, Dataset)
# assert ds.name == 'PCIC'
# assert 'precip' in ds
# assert ds.gdal and ds.isProjected
# # test CVDP
# ds = loadDataset(name='HadISST', period=None, varlist=None, mode='CVDP', exps=WRF_exps)
# assert isinstance(ds, Dataset)
# assert ds.name == 'HadISST'
# assert 'PDO' in ds
# assert ds.gdal and not ds.isProjected
# # test CVDP with WRF
# ds = loadDataset(name='phys-ens-2100', period=None, varlist=None, mode='CVDP', exps=WRF_exps)
# assert isinstance(ds, Dataset)
# assert ds.name == 'phys-ens-2100'
# assert 'PDO' in ds
# assert ds.gdal and not ds.isProjected
# # test WRF station time-series
# ds = loadStnTS(name='ctrl-1_d02', varlist=['MaxPrecip_1d'], station='ecprecip', filetypes='hydro')
# assert isinstance(ds, Dataset)
# assert ds.name == 'ctrl-1_d02'
# assert 'MaxPrecip_1d' in ds
# # test example with list expansion
# # test EC station time-series
# dss = loadStnTS(name=['EC','ctrl-1'], varlist=['MaxPrecip_1d','precip'],
# station='ecprecip', filetypes='hydro',
# load_list=['name','varlist'], lproduct='outer')
# assert len(dss) == 4
# assert isinstance(ds, Dataset)
# assert dss[1].name == 'EC' and dss[2].name == 'ctrl-1'
# assert 'MaxPrecip_1d' in dss[0] and 'precip' in dss[1]
# assert 'MaxPrecip_1d' not in dss[3] and 'precip' not in dss[2]
# def testBasicLoadEnsembleTS(self):
# ''' test station data load functions (ensemble and list) '''
# from datasets.common import loadEnsembleTS, loadEnsembles
# # test list expansion of ensembles loading
# names = ['EC', 'phys-ens']; varlist = ['MaxPrecip_1d']
# prov = ['BC','AB']; season = ['summer','winter']; aggregation = ['max','min']
# constraints = dict(min_len=50, lat=(50,55), max_zerr=300, prov=('AB','BC'))
# enslst = loadEnsembles(names=names, prov=prov, season=season, aggregation=aggregation, station='ecprecip',
# constraints=constraints, varlist=varlist, filetypes=['hydro'], domain=2,
# load_list=[('aggregation','season'),'prov',], lproduct='outer', lwrite=False,
# lensembleAxis=True)
# assert len(enslst) == 4
# assert all(isinstance(ens, Ensemble) for ens in enslst)
# assert all(ens.basetype.__name__ == 'Dataset' for ens in enslst)
# assert all(ens.hasVariable(varlist[0]) for ens in enslst)
# assert all(ens.hasAxis('ensemble') for ens in enslst)
# assert all('EC' in ens for ens in enslst)
# # test simple ensemble with basins
# names = ['GPCC', 'phys-ens_d01','max-ens-2100']; varlist = ['precip']
# aggregation = None; slices = dict(shape_name='ARB'); obsslices = dict(years=(1939,1945))
# shpens = loadEnsembleTS(names=names, season=None, shape='shpavg', aggregation=aggregation,
# slices=slices, varlist=varlist, filetypes=['hydro'], obsslices=obsslices)
# assert isinstance(shpens, Ensemble)
# assert shpens.basetype.__name__ == 'Dataset'
# assert all(shpens.hasVariable(varlist[0]))
# assert names[0] in shpens
# assert len(shpens[names[0]].time) == 72 # time-series
# assert len(shpens[names[-1]].time) == 720 # ensemble
# assert all('ARB' == ds.atts.shape_name for ds in shpens)
# def testAdvancedLoadEnsembleTS(self):
# ''' test station data load functions (ensemble and list) '''
# from datasets.common import loadEnsembleTS
# lwrite = False
# # test ensemble (inner) list expansion
# names = 'CRU'; varlist = ['precip']; slices = dict(shape_name='FRB');
# obsslices = [dict(years=(1914,1918)), dict(years=(1939,1945))]
# name_tags = ['_1914','_1939']
# shpens = loadEnsembleTS(names=names, shape='shpavg', name_tags=name_tags, obsslices=obsslices,
# slices=slices, varlist=varlist, filetypes=['hydro'],
# aggregation=None, season=None,
# ensemble_list=['obsslices', 'name_tags'])
# assert isinstance(shpens, Ensemble)
# assert shpens.basetype.__name__ == 'Dataset'
# assert all(shpens.hasVariable(varlist[0]))
# assert all('CRU' == ds.name[:3] for ds in shpens)
# assert len(shpens['CRU_1914'].time) == 48 # time-series
# assert len(shpens['CRU_1939'].time) == 72 # time-series
# assert all('FRB' == ds.atts.shape_name for ds in shpens)
# # test ensemble (inner) list expansion with outer list expansion
# varlist = ['MaxPrecip_1d']; constraints = dict(min_len=50, lat=(50,55), max_zerr=300,)
# # inner expansion
# names = ['EC', 'EC', 'erai-max']; name_tags = ['_1990','_1940','WRF_1990']
# obsslices = [dict(years=(1929,1945)), dict(years=(1979,1995)), dict()]
# # outer expansion
# prov = ['BC','AB']; season = ['summer','winter']; mode = ['max']
# # load data
# enslst = loadEnsembleTS(names=names, prov=prov, season=season, mode=mode, station='ecprecip',
# constraints=constraints, name_tags=name_tags, obsslices=obsslices,
# domain=2, filetypes=['hydro'], varlist=varlist, ensemble_product='inner',
# ensemble_list=['names','name_tags','obsslices',], lwrite=lwrite,
# load_list=['mode','season','prov',], lproduct='outer',)
# assert len(enslst) == 4
# assert all(isinstance(ens, Ensemble) for ens in enslst)
# assert all(ens.basetype.__name__ == 'Dataset' for ens in enslst)
# assert all(ens.hasVariable(varlist[0]) for ens in enslst)
# assert all('EC_1990' in ens for ens in enslst)
# assert all('EC_1940' in ens for ens in enslst)
# assert all('WRF_1990' in ens for ens in enslst)
# # add CVDP data
# cvdp = loadEnsembleTS(names=names, prov=prov, season=season, mode=mode,
# name_tags=name_tags, obsslices=obsslices,
# varlist=['PDO'], ensemble_product='inner',
# ensemble_list=['names','name_tags','obsslices',], lwrite=lwrite,
# load_list=['mode','season','prov',], lproduct='outer',
# dataset_mode='CVDP')
# assert all(ens.hasVariable('PDO') for ens in enslst)
# # add PDO time-series to datasets
# for ts,cv in zip(enslst,cvdp):
# ts.addVariable(cv.PDO, lautoTrim=True)
# all(ens.hasVariable('PDO') for ens in enslst)
# # test slicing by PDO
# ds = enslst[0]['WRF_1990']
# assert ds(PDO=(-1,0.), lminmax=True)
# ## some debugging test
# # NetCDF datasets to add cluster_id to
# wrfensnc = ['max-ctrl','max-ens-A','max-ens-B','max-ens-C', # Ensembles don't have unique NetCDF files
# 'max-ctrl-2050','max-ens-A-2050','max-ens-B-2050','max-ens-C-2050',
# 'max-ctrl-2100','max-ens-A-2100','max-ens-B-2100','max-ens-C-2100',]
# wrfensnc = loadEnsembleTS(names=wrfensnc, name='WRF_NC', title=None, varlist=None,
# station='ecprecip', filetypes=['hydro'], domain=2, lwrite=lwrite)
# # climatology
# constraints = dict()
# constraints['min_len'] = 10 # for valid climatology
# constraints['lat'] = (45,60)
# #constraints['max_zerr'] = 100 # can't use this, because we are loading EC data separately from WRF
# constraints['prov'] = ('BC','AB')
# wrfens = loadEnsembleTS(names=['max-ens','max-ens-2050','max-ens-2100'], name='WRF', title=None,
# varlist=None,
# aggregation='mean', station='ecprecip', constraints=constraints, filetypes=['hydro'],
# domain=2, lwrite=False)
# wrfens = wrfens.copy(asNC=False) # read-only DatasetNetCDF can't add new variables (not as VarNC, anyway...)
# # gevens = [ens.fitDist(lflatten=True, axis=None) for ens in enslst]
# # print(''); print(gevens[0][0])
def testLoadStandardDeviation(self):
''' test station data load functions (ensemble and list) '''
from datasets.common import loadEnsembleTS
# just a random function call that exposes a bug in Numpy's nanfunctions.py
slices = {'shape_name': 'FRB', 'years': (1979, 1994)}
loadEnsembleTS(names='CRU', season=None, aggregation='SEM', slices=slices,
varlist=['precip'], shape='shpavg', ldataset=True)
# N.B.: the following link to a patched file should fix the problem:
# /home/data/Enthought/EPD/lib/python2.7/site-packages/numpy/lib/nanfunctions.py
# -> /home/data/Code/PyGeoData/src/utils/nanfunctions.py
# But diff first, to check for actual updates!
# P/S at the moment I'm importing the custom nanfunctions directly
if __name__ == "__main__":
specific_tests = []
# specific_tests += ['ApplyAlongAxis']
# specific_tests += ['AsyncPool']
# specific_tests += ['ExpArgList']
# specific_tests += ['LoadStandardDeviation']
## N.B.: these three tests are currently commented out and need to be revised completely;
## most of the dataset/ensemble loading functionality is no handled in the Projects repo
# specific_tests += ['LoadDataset']
# specific_tests += ['BasicLoadEnsembleTS']
# specific_tests += ['AdvancedLoadEnsembleTS']
# list of tests to be performed
tests = []
# list of variable tests
tests += ['MultiProcess']
tests += ['Datasets']
# construct dictionary of test classes defined above
test_classes = dict()
local_values = locals().copy()
for key,val in local_values.items():
if key[-4:] == 'Test':
test_classes[key[:-4]] = val
# run tests
report = []
for test in tests: # test+'.test'+specific_test
if len(specific_tests) > 0:
test_names = ['misc_test.'+test+'Test.test'+s_t for s_t in specific_tests]
s = unittest.TestLoader().loadTestsFromNames(test_names)
else: s = unittest.TestLoader().loadTestsFromTestCase(test_classes[test])
report.append(unittest.TextTestRunner(verbosity=2).run(s))
# print summary
runs = 0; errs = 0; fails = 0
for name,test in zip(tests,report):
#print test, dir(test)
runs += test.testsRun
e = len(test.errors)
errs += e
f = len(test.failures)
fails += f
if e+ f != 0: print(("\nErrors in '{:s}' Tests: {:s}".format(name,str(test))))
if errs + fails == 0:
print(("\n *** All {:d} Test(s) successfull!!! *** \n".format(runs)))
else:
print(("\n ### Test Summary: ### \n" +
" ### Ran {:2d} Test(s) ### \n".format(runs) +
" ### {:2d} Failure(s) ### \n".format(fails)+
" ### {:2d} Error(s) ### \n".format(errs)))
| aerler/GeoPy | src/misc_test.py | Python | gpl-3.0 | 16,727 | [
"NetCDF"
] | 628196e21dc09c942709d8bb60b91b9b5425ecd20f7a82de4e4f2804667c13b8 |
from __future__ import absolute_import, division, print_function
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from skbio.io import FileFormatError
class SequenceCollectionError(Exception):
"""General error for sequence collection validation failures."""
pass
class AlignmentError(SequenceCollectionError):
"""General error for alignment validation failures."""
pass
class StockholmParseError(FileFormatError):
"""Exception raised when a Stockholm formatted file cannot be parsed."""
pass
| Kleptobismol/scikit-bio | skbio/alignment/_exception.py | Python | bsd-3-clause | 828 | [
"scikit-bio"
] | 04d3868263cba1afca60a4d34b84be1666ca7a6f37d0d4b93d141685b40ca030 |
#!/usr/bin/python3
# Copyright 2016-2018 Brian Warner
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# Reset a stuck status
#
# The facade-worker.py script will only run if the status in the db is idle. If
# it is killed halfway through, this can leave it in an erroneous state. This
# script resets it. Only run it if you know no analysis is actually running,
# otherwise you'll thrash your machine.
import MySQLdb
import configparser
try:
config = configparser.ConfigParser()
config.read('db.cfg')
# Read in the people connection info
db_user = config['main_database']['user']
db_pass = config['main_database']['pass']
db_name = config['main_database']['name']
db_host = config['main_database']['host']
db = MySQLdb.connect(
host = db_host,
user = db_user,
passwd = db_pass,
db = db_name,
charset = 'utf8mb4')
cursor = db.cursor(MySQLdb.cursors.DictCursor)
except:
sys.exit("It appears you haven't run setup.py yet. Please do this.")
query = "UPDATE settings SET value='Idle' WHERE setting='utility_status'"
cursor.execute(query)
db.commit()
query = ("INSERT INTO utility_log (level,status) VALUES "
"('Error','facade-worker.py manually reset')")
cursor.execute(query)
db.commit()
cursor.close()
db.close()
| mkdolan/facade | utilities/reset-status.py | Python | apache-2.0 | 1,779 | [
"Brian"
] | 8447ea84deaab4e73917db8f846cddb562190b53b22f19b4f24548befd4f563f |
# [Apr 2018] stolen directly from scipy so I can get an array back
# https://github.com/scipy/scipy/blob/master/scipy/optimize/_hungarian.py
# Hungarian algorithm (Kuhn-Munkres) for solving the linear sum assignment
# problem. Taken from scikit-learn. Based on original code by Brian Clapper,
# adapted to NumPy by Gael Varoquaux.
# Further improvements by Ben Root, Vlad Niculae and Lars Buitinck.
#
# Copyright (c) 2008 Brian M. Clapper <bmc@clapper.org>, Gael Varoquaux
# Author: Brian M. Clapper, Gael Varoquaux
# License: 3-clause BSD
import numpy as np
def linear_sum_assignment(cost_matrix, return_cost=False):
"""Solve the linear sum assignment problem.
The linear sum assignment problem is also known as minimum weight matching
in bipartite graphs. A problem instance is described by a matrix C, where
each C[i,j] is the cost of matching vertex i of the first partite set
(a "worker") and vertex j of the second set (a "job"). The goal is to find
a complete assignment of workers to jobs of minimal cost.
Formally, let X be a boolean matrix where :math:`X[i,j] = 1` iff row i is
assigned to column j. Then the optimal assignment has cost
.. math::
\\min \\sum_i \\sum_j C_{i,j} X_{i,j}
s.t. each row is assignment to at most one column, and each column to at
most one row.
This function can also solve a generalization of the classic assignment
problem where the cost matrix is rectangular. If it has more rows than
columns, then not every row needs to be assigned to a column, and vice
versa.
The method used is the Hungarian algorithm, also known as the Munkres or
Kuhn-Munkres algorithm.
Parameters
----------
cost_matrix : array
The cost matrix of the bipartite graph.
return_cost : bool, optional
If True, also return a copy of the cost_matrix reduced to maximal
zeros at the end of the Munkres algorithm.
Returns
-------
row_ind, col_ind : array
An array of row indices and one of corresponding column indices giving
the optimal assignment. The cost of the assignment can be computed
as ``cost_matrix[row_ind, col_ind].sum()``. The row indices will be
sorted; in the case of a square cost matrix they will be equal to
``numpy.arange(cost_matrix.shape[0])``.
(row_ind, col_ind), cost
Only provided if `return_cost` is True.
Notes
-----
.. versionadded:: 0.17.0
Examples
--------
>>> cost = np.array([[4, 1, 3], [2, 0, 5], [3, 2, 2]])
>>> from scipy.optimize import linear_sum_assignment
>>> row_ind, col_ind = linear_sum_assignment(cost)
>>> col_ind
array([1, 0, 2])
>>> cost[row_ind, col_ind].sum()
5
References
----------
1. http://csclab.murraystate.edu/bob.pilgrim/445/munkres.html
2. Harold W. Kuhn. The Hungarian Method for the assignment problem.
*Naval Research Logistics Quarterly*, 2:83-97, 1955.
3. Harold W. Kuhn. Variants of the Hungarian method for assignment
problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956.
4. Munkres, J. Algorithms for the Assignment and Transportation Problems.
*J. SIAM*, 5(1):32-38, March, 1957.
5. https://en.wikipedia.org/wiki/Hungarian_algorithm
"""
cost_matrix = np.asarray(cost_matrix)
if len(cost_matrix.shape) != 2:
raise ValueError("expected a matrix (2-d array), got a %r array"
% (cost_matrix.shape,))
if not (np.issubdtype(cost_matrix.dtype, np.number) or
cost_matrix.dtype == np.dtype(np.bool)):
raise ValueError("expected a matrix containing numerical entries, got %s"
% (cost_matrix.dtype,))
if np.any(np.isinf(cost_matrix) | np.isnan(cost_matrix)):
raise ValueError("matrix contains invalid numeric entries")
if cost_matrix.dtype == np.dtype(np.bool):
cost_matrix = cost_matrix.astype(np.int)
# The algorithm expects more columns than rows in the cost matrix.
if cost_matrix.shape[1] < cost_matrix.shape[0]:
cost_matrix = cost_matrix.T
transposed = True
else:
transposed = False
state = _Hungary(cost_matrix)
# No need to bother with assignments if one of the dimensions
# of the cost matrix is zero-length.
step = None if 0 in cost_matrix.shape else _step1
while step is not None:
step = step(state)
if transposed:
marked = state.marked.T
else:
marked = state.marked
if return_cost:
return np.where(marked == 1), state.C
else:
return np.where(marked == 1)
class _Hungary(object):
"""State of the Hungarian algorithm.
Parameters
----------
cost_matrix : 2D matrix
The cost matrix. Must have shape[1] >= shape[0].
"""
def __init__(self, cost_matrix):
self.C = cost_matrix.copy()
n, m = self.C.shape
self.row_uncovered = np.ones(n, dtype=bool)
self.col_uncovered = np.ones(m, dtype=bool)
self.Z0_r = 0
self.Z0_c = 0
self.path = np.zeros((n + m, 2), dtype=int)
self.marked = np.zeros((n, m), dtype=int)
def _clear_covers(self):
"""Clear all covered matrix cells"""
self.row_uncovered[:] = True
self.col_uncovered[:] = True
# Individual steps of the algorithm follow, as a state machine: they return
# the next step to be taken (function to be called), if any.
def _step1(state):
"""Steps 1 and 2 in the Wikipedia page."""
# Step 1: For each row of the matrix, find the smallest element and
# subtract it from every element in its row.
state.C -= state.C.min(axis=1)[:, np.newaxis]
# Step 2: Find a zero (Z) in the resulting matrix. If there is no
# starred zero in its row or column, star Z. Repeat for each element
# in the matrix.
for i, j in zip(*np.where(state.C == 0)):
if state.col_uncovered[j] and state.row_uncovered[i]:
state.marked[i, j] = 1
state.col_uncovered[j] = False
state.row_uncovered[i] = False
state._clear_covers()
return _step3
def _step3(state):
"""
Cover each column containing a starred zero. If n columns are covered,
the starred zeros describe a complete set of unique assignments.
In this case, Go to DONE, otherwise, Go to Step 4.
"""
marked = (state.marked == 1)
state.col_uncovered[np.any(marked, axis=0)] = False
if marked.sum() < state.C.shape[0]:
return _step4
def _step4(state):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
# We convert to int as numpy operations are faster on int
C = (state.C == 0).astype(int)
covered_C = C * state.row_uncovered[:, np.newaxis]
covered_C *= np.asarray(state.col_uncovered, dtype=int)
n = state.C.shape[0]
m = state.C.shape[1]
while True:
# Find an uncovered zero
row, col = np.unravel_index(np.argmax(covered_C), (n, m))
if covered_C[row, col] == 0:
return _step6
else:
state.marked[row, col] = 2
# Find the first starred element in the row
star_col = np.argmax(state.marked[row] == 1)
if state.marked[row, star_col] != 1:
# Could not find one
state.Z0_r = row
state.Z0_c = col
return _step5
else:
col = star_col
state.row_uncovered[row] = False
state.col_uncovered[col] = True
covered_C[:, col] = C[:, col] * (
np.asarray(state.row_uncovered, dtype=int))
covered_C[row] = 0
def _step5(state):
"""
Construct a series of alternating primed and starred zeros as follows.
Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0 (if any).
Let Z2 denote the primed zero in the row of Z1 (there will always be one).
Continue until the series terminates at a primed zero that has no starred
zero in its column. Unstar each starred zero of the series, star each
primed zero of the series, erase all primes and uncover every line in the
matrix. Return to Step 3
"""
count = 0
path = state.path
path[count, 0] = state.Z0_r
path[count, 1] = state.Z0_c
while True:
# Find the first starred element in the col defined by
# the path.
row = np.argmax(state.marked[:, path[count, 1]] == 1)
if state.marked[row, path[count, 1]] != 1:
# Could not find one
break
else:
count += 1
path[count, 0] = row
path[count, 1] = path[count - 1, 1]
# Find the first prime element in the row defined by the
# first path step
col = np.argmax(state.marked[path[count, 0]] == 2)
if state.marked[row, col] != 2:
col = -1
count += 1
path[count, 0] = path[count - 1, 0]
path[count, 1] = col
# Convert paths
for i in range(count + 1):
if state.marked[path[i, 0], path[i, 1]] == 1:
state.marked[path[i, 0], path[i, 1]] = 0
else:
state.marked[path[i, 0], path[i, 1]] = 1
state._clear_covers()
# Erase all prime markings
state.marked[state.marked == 2] = 0
return _step3
def _step6(state):
"""
Add the value found in Step 4 to every element of each covered row,
and subtract it from every element of each uncovered column.
Return to Step 4 without altering any stars, primes, or covered lines.
"""
# the smallest uncovered value in the matrix
if np.any(state.row_uncovered) and np.any(state.col_uncovered):
minval = np.min(state.C[state.row_uncovered], axis=0)
minval = np.min(minval[state.col_uncovered])
state.C[~state.row_uncovered] += minval
state.C[:, state.col_uncovered] -= minval
return _step4
| amjames/psi4 | psi4/driver/qcdb/util/scipy_hungarian.py | Python | lgpl-3.0 | 10,333 | [
"Brian"
] | 6dab6a8c6029427a2264afdfef37778f3c55590f4f3405abacf81bccc848e754 |
"""Additional docstring for the Transformation module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
MODULE = "DIRAC.TransformationSystem.Client.Transformation"
class CustomTransformation(object): # pylint: disable=too-few-public-methods
"""Add the ERRNO constants to the docstring automatically."""
def __init__(self):
"""Create a string containing restructured text documentation for all the special tranformation parameters
:param str module: full path to the module
:param bool replace: wether to replace the full docstring or not
if you replace it completely add the automodule commands etc.!
"""
self.module = MODULE
self.replace = False
self.doc_string = ""
# Fill the docstring addition with what we want to add
self.doc_string += textwrap.dedent(
"""
Transformation Parameters
-------------------------
Any parameter with ``ParameterName`` can be set for a transformation with a call
to ``setParameterName(parameterValue)``.
The following parameters have a special meaning
"""
)
from DIRAC.TransformationSystem.Client.Transformation import Transformation
trans = Transformation()
for paramName in sorted(trans.paramTypes):
self.doc_string += "\n``%s``:\n Default value: %r" % (paramName, trans.paramValues[paramName])
CUSTOMIZED_DOCSTRINGS[MODULE] = CustomTransformation() # pylint: disable=undefined-variable
| ic-hep/DIRAC | docs/diracdoctools/CustomizedDocs/CustomTransformation.py | Python | gpl-3.0 | 1,798 | [
"DIRAC"
] | 2094047b08d1eaac55d20f0a8c7f80b4a46bb9182a5210e752e25e0613bb6570 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
#
"""Fast distance array computation --- :mod:`MDAnalysis.lib.distances`
===================================================================
Fast C-routines to calculate distance arrays from coordinate
arrays. Many of the functions also exist in parallel versions, that
typically provide higher performance than the serial code.
The boolean attribute MDAnalysis.lib.distances.USED_OPENMP can be
checked to see if OpenMP was used in the compilation of MDAnalysis.
Selection of acceleration ("backend")
-------------------------------------
All functions take the optional keyword *backend*, which determines
the type of acceleration. Currently, the following choices are
implemented (*backend* is case-insensitive):
.. Table:: Available *backends* for accelerated distance functions.
========== ========================= ======================================
*backend* module description
========== ========================= ======================================
"serial" :mod:`c_distances` serial implementation in C/Cython
"OpenMP" :mod:`c_distances_openmp` parallel implementation in C/Cython
with OpenMP
========== ========================= ======================================
.. versionadded:: 0.13.0
Functions
---------
.. autofunction:: distance_array(reference, configuration [, box [, result [, backend]]])
.. autofunction:: self_distance_array(reference [, box [,result [, backend]]])
.. autofunction:: calc_bonds(atom1, atom2 [, box, [, result [, backend]]])
.. autofunction:: calc_angles(atom1, atom2, atom3 [,box [, result [, backend]]])
.. autofunction:: calc_dihedrals(atom1, atom2, atom3, atom4 [,box [, result [, backend]]])
.. autofunction:: apply_PBC(coordinates, box [, backend])
.. autofunction:: transform_RtoS(coordinates, box [, backend])
.. autofunction:: transform_StoR(coordinates, box [,backend])
"""
from __future__ import division, absolute_import
from six.moves import range
import numpy as np
from numpy.lib.utils import deprecate
from .mdamath import triclinic_vectors, triclinic_box
# hack to select backend with backend=<backend> kwarg. Note that
# the cython parallel code (prange) in parallel.distances is
# independent from the OpenMP code
import importlib
_distances = {}
_distances['serial'] = importlib.import_module(".c_distances",
package="MDAnalysis.lib")
try:
_distances['openmp'] = importlib.import_module(".c_distances_openmp",
package="MDAnalysis.lib")
except ImportError:
pass
del importlib
def _run(funcname, args=None, kwargs=None, backend="serial"):
"""Helper function to select a backend function *funcname*."""
args = args if args is not None else tuple()
kwargs = kwargs if kwargs is not None else dict()
backend = backend.lower()
try:
func = getattr(_distances[backend], funcname)
except KeyError:
raise ValueError("Function {0} not available with backend {1}; try one of: {2}".format(
funcname, backend, ", ".join(_distances.keys())))
return func(*args, **kwargs)
# serial versions are always available (and are typically used within
# the core and topology modules)
from .c_distances import (calc_distance_array,
calc_distance_array_ortho,
calc_distance_array_triclinic,
calc_self_distance_array,
calc_self_distance_array_ortho,
calc_self_distance_array_triclinic,
coord_transform,
calc_bond_distance,
calc_bond_distance_ortho,
calc_bond_distance_triclinic,
calc_angle,
calc_angle_ortho,
calc_angle_triclinic,
calc_dihedral,
calc_dihedral_ortho,
calc_dihedral_triclinic,
ortho_pbc,
triclinic_pbc)
from .c_distances_openmp import OPENMP_ENABLED as USED_OPENMP
def _box_check(box):
"""Take a box input and deduce what type of system it represents based
on the shape of the array and whether all angles are 90.
Parameters
----------
box : array
Box information of unknown format.
Returns
-------
boxtype : str
* ``ortho`` orthogonal box
* ``tri_vecs`` triclinic box vectors
* ``tri_box`` triclinic box lengths and angles
Raises
------
TypeError
If box is not float32.
ValueError
If box type not detected.
"""
if box.dtype != np.float32:
raise TypeError("Box must be of type float32")
boxtype = 'unknown'
if box.shape == (3,):
boxtype = 'ortho'
elif box.shape == (3, 3):
if np.all([box[0][1] == 0.0, # Checks that tri box is properly formatted
box[0][2] == 0.0,
box[1][2] == 0.0]):
boxtype = 'tri_vecs'
else:
boxtype = 'tri_vecs_bad'
elif box.shape == (6,):
if np.all(box[3:] == 90.):
boxtype = 'ortho'
else:
boxtype = 'tri_box'
if boxtype == 'unknown':
raise ValueError("box input not recognised"
", must be an array of box dimensions")
return boxtype
def _check_array(coords, desc):
"""Check an array is a valid array of coordinates
Must be:
(n,3) in shape
float32 data
"""
if (coords.ndim != 2 or coords.shape[1] != 3):
raise ValueError("{0} must be a sequence of 3 dimensional coordinates"
"".format(desc))
if coords.dtype != np.float32:
raise TypeError("{0} must be of type float32".format(desc))
def _check_results_array(results, size):
"""Check the results array is ok to use
Must be:
same shape as size
float64
"""
if results.shape != size:
raise ValueError("Result array has incorrect size,"
"should be {0}, got {1}".format(size, results.shape))
if results.dtype != np.float64:
raise TypeError("Results array must be of type float64")
def _check_lengths_match(*arrays):
"""Check all arrays are same shape"""
ref = arrays[0].shape
if not all( a.shape == ref for a in arrays):
raise ValueError("Input arrays must all be same shape"
"Got {0}".format([a.shape for a in arrays]))
def distance_array(reference, configuration, box=None, result=None, backend="serial"):
"""Calculate all distances between a reference set and another configuration.
If there are *i* positions in reference, and *j* positions in configuration,
will calculate a *i* x *j* array of distances
If an *box* is supplied then a minimum image convention is used when
calculating distances.
If a 2D numpy array of dtype ``numpy.float64`` with the shape ``(len(reference),
len(configuration))`` is provided in *result* then this preallocated array is
filled. This can speed up calculations.
Parameters
----------
reference : numpy.array of numpy.float32
Reference coordinate array.
configuration : numpy.array of numpy.float32
Configuration coordinate array.
box : numpy.array or None
Dimensions of the cell; if provided, the minimum image convention is
applied. The dimensions must be provided in the same format as returned
by by :attr:`MDAnalysis.coordinates.base.Timestep.dimensions`: ``[lx,
ly, lz, alpha, beta, gamma]``.
result : numpy.array of numpy.float64, optional
Preallocated result array which must have the
shape ``(len(ref), len(conf))`` and ``dtype=numpy.float64``.
Avoids creating the array which saves time when the function
is called repeatedly. [``None``]
backend
Select the type of acceleration; "serial" is always available. Other
possibilities are "OpenMP" (OpenMP).
Returns
-------
d : numpy.array
``(len(reference),len(configuration))`` numpy array with the distances
``d[i,j]`` between reference coordinates `i` and configuration
coordinates `j`.
Note
----
This method is slower than it could be because internally we need to make
copies of the ref and conf arrays.
.. versionchanged:: 0.13.0
Added *backend* keyword.
"""
ref = reference.copy('C')
conf = configuration.copy('C')
_check_array(conf, 'conf')
_check_array(ref, 'ref')
if box is not None:
boxtype = _box_check(box)
# Convert [A,B,C,alpha,beta,gamma] to [[A],[B],[C]]
if (boxtype == 'tri_box'):
box = triclinic_vectors(box)
if (boxtype == 'tri_vecs_bad'):
box = triclinic_vectors(triclinic_box(box[0], box[1], box[2]))
confnum = conf.shape[0]
refnum = ref.shape[0]
if result is not None:
_check_results_array(result, (refnum, confnum))
distances = np.asarray(result)
else:
distances = np.zeros((refnum, confnum), np.float64)
if box is not None:
if boxtype == 'ortho':
_run("calc_distance_array_ortho",
args=(ref, conf, box, distances),
backend=backend)
else:
_run("calc_distance_array_triclinic",
args=(ref, conf, box, distances),
backend=backend)
else:
_run("calc_distance_array",
args=(ref, conf, distances),
backend=backend)
return distances
def self_distance_array(reference, box=None, result=None, backend="serial"):
"""Calculate all distances within a configuration *reference*.
If a *box* is supplied then a minimum image convention is used before
calculating distances.
If a 1D numpy array of dtype ``numpy.float64`` with the shape
``(N*(N-1)/2)`` is provided in *result* then this preallocated array
is filled. This can speed up calculations.
Parameters
----------
reference : array
Reference coordinate array with ``N=len(ref)`` coordinates.
box : array or None
Dimensions of the cell; if provided, the minimum image convention is
applied. The dimensions must be provided in the same format as returned
by :attr:`MDAnalysis.coordinates.base.Timestep.dimensions`: ``[lx,
ly, lz, alpha, beta, gamma]``.
result : array, optional
Preallocated result array which must have the shape
``(N*(N-1)/2,)`` and dtype ``numpy.float64``. Avoids creating
the array which saves time when the function is called
repeatedly. [``None``]
backend
Select the type of acceleration; "serial" is always available. Other
possibilities are "OpenMP" (OpenMP).
Returns
-------
d : array
``N*(N-1)/2`` numpy 1D array with the distances dist[i,j] between ref
coordinates i and j at position d[k]. Loop through d:
.. code-block:: python
for i in range(N):
for j in range(i+1, N):
k += 1
dist[i,j] = d[k]
Note
----
This method is slower than it could be because internally we need to make
copies of the coordinate arrays.
.. versionchanged:: 0.13.0
Added *backend* keyword.
"""
ref = reference.copy('C')
_check_array(ref, 'ref')
with_PBC = (box is not None)
if box is not None:
boxtype = _box_check(box)
# Convert [A,B,C,alpha,beta,gamma] to [[A],[B],[C]]
if (boxtype == 'tri_box'):
box = triclinic_vectors(box)
if (boxtype == 'tri_vecs_bad'):
box = triclinic_vectors(triclinic_box(box[0], box[1], box[2]))
refnum = ref.shape[0]
distnum = refnum * (refnum - 1) // 2
if result is not None:
_check_results_array(result, (distnum,))
distances = np.asarray(result)
else:
distances = np.zeros((distnum,), np.float64)
if box is not None:
if boxtype == 'ortho':
_run("calc_self_distance_array_ortho",
args=(ref, box, distances),
backend=backend)
else:
_run("calc_self_distance_array_triclinic",
args=(ref, box, distances),
backend=backend)
else:
_run("calc_self_distance_array",
args=(ref, distances),
backend=backend)
return distances
def transform_RtoS(inputcoords, box, backend="serial"):
"""Transform an array of coordinates from real space to S space (aka lambda space)
S space represents fractional space within the unit cell for this system
Reciprocal operation to :meth:`transform_StoR`
Parameters
----------
inputcoords : array
A n x 3 array of coordinate data, of type ``np.float32``.
box : array
The unitcell dimesions for this system.
The dimensions must be provided in the same format as returned
by :attr:`MDAnalysis.coordinates.base.Timestep.dimensions`: ``[lx,
ly, lz, alpha, beta, gamma]``.
backend
Select the type of acceleration; "serial" is always available. Other
possibilities are "OpenMP" (OpenMP).
Returns
-------
outcoords : array
A n x 3 array of fractional coordiantes.
.. versionchanged:: 0.13.0
Added *backend* keyword.
"""
coords = inputcoords.copy('C')
numcoords = coords.shape[0]
boxtype = _box_check(box)
# Convert [A,B,C,alpha,beta,gamma] to [[A],[B],[C]]
if (boxtype == 'tri_box'):
box = triclinic_vectors(box)
if (boxtype == 'tri_vecs_bad'):
box = triclinic_vectors(triclinic_box(box[0], box[1], box[2]))
elif (boxtype == 'ortho'):
box = np.array([[box[0], 0.0, 0.0],
[0.0, box[1], 0.0],
[0.0, 0.0, box[2]]], dtype=np.float32)
# Create inverse matrix of box
# need order C here
inv = np.array(np.matrix(box).I, dtype=np.float32, order='C')
_run("coord_transform",
args=(coords, inv),
backend=backend)
return coords
def transform_StoR(inputcoords, box, backend="serial"):
"""Transform an array of coordinates from S space into real space.
S space represents fractional space within the unit cell for this system
Reciprocal operation to :meth:`transform_RtoS`
Parameters
----------
inputcoords : array
A n x 3 array of coordinate data, of type np.float32
box : array
The unitcell dimesions for this system.
The dimensions must be provided in the same format as returned
by :attr:`MDAnalysis.coordinates.base.Timestep.dimensions`: ``[lx,
ly, lz, alpha, beta, gamma]``.
backend
Select the type of acceleration; "serial" is always available. Other
possibilities are "OpenMP" (OpenMP).
Returns
-------
outcoords : array
A n x 3 array of fracional coordiantes.
.. versionchanged:: 0.13.0
Added *backend* keyword.
"""
coords = inputcoords.copy('C')
numcoords = coords.shape[0]
boxtype = _box_check(box)
# Convert [A,B,C,alpha,beta,gamma] to [[A],[B],[C]]
if (boxtype == 'tri_box'):
box = triclinic_vectors(box)
elif (boxtype == 'ortho'):
box = np.array([[box[0], 0.0, 0.0],
[0.0, box[1], 0.0],
[0.0, 0.0, box[2]]], dtype=np.float32)
_run("coord_transform",
args=(coords, box),
backend=backend)
return coords
def calc_bonds(coords1, coords2, box=None, result=None, backend="serial"):
"""
Calculate all distances between a pair of atoms. *atom1* and *atom2* are both
arrays of coordinates, where atom1[i] and atom2[i] represent a bond.
In comparison to distance_array and self_distance_array which calculate distances
between all combinations of coordinates, calc_bonds can be used to calculate distance
between pairs of objects, similar to::
numpy.linalg.norm(a - b) for a, b in zip(coords1, coords2)
The optional argument *box* applies minimum image convention if supplied.
*box* can be either orthogonal or triclinic
If a 1D numpy array of dtype ``numpy.float64`` with ``len(atom1)`` elements is
provided in *result* then this preallocated array is filled. This can speed
up calculations.
bondlengths = calc_bonds(coords1, coords2 [, box [,result=bondlengths]])
Parameters
----------
coords1 : array
An array of coordinates for one half of the bond.
coords2 : array
An array of coordinates for the other half of bond
box : array
The unitcell dimesions for this system.
The dimensions must be provided in the same format as returned
by :attr:`MDAnalysis.coordinates.base.Timestep.dimensions`: ``[lx,
ly, lz, alpha, beta, gamma]``.
result : array, optional
Preallocated result array which must be same length as coord
arrays and ``dtype=numpy.float64``. Avoids creating the
array which saves time when the function is called repeatedly. [None]
backend
Select the type of acceleration; "serial" is always available. Other
possibilities are "OpenMP" (OpenMP).
Returns
-------
bondlengths : array
The length between each pair in coords1 and coords2
.. versionadded:: 0.8
.. versionchanged:: 0.13.0
Added *backend* keyword.
"""
atom1 = coords1.copy('C')
atom2 = coords2.copy('C')
_check_array(atom1, 'atom1')
_check_array(atom2, 'atom2')
_check_lengths_match(atom1, atom2)
if box is not None:
boxtype = _box_check(box)
# Convert [A,B,C,alpha,beta,gamma] to [[A],[B],[C]]
if (boxtype == 'tri_box'):
box = triclinic_vectors(box)
if (boxtype == 'tri_vecs_bad'):
box = triclinic_vectors(triclinic_box(box[0], box[1], box[2]))
numatom = atom1.shape[0]
if result is not None:
_check_results_array(result, (numatom,))
distances = np.asarray(result)
else:
distances = np.zeros((numatom,), np.float64)
if box is not None:
if boxtype == 'ortho':
_run("calc_bond_distance_ortho",
args=(atom1, atom2, box, distances),
backend=backend)
else:
_run("calc_bond_distance_triclinic",
args=(atom1, atom2, box, distances),
backend=backend)
else:
_run("calc_bond_distance",
args=(atom1, atom2, distances),
backend=backend)
return distances
def calc_angles(coords1, coords2, coords3, box=None, result=None, backend="serial"):
"""
Calculates the angle formed between three atoms, over a list of coordinates.
All *atom* inputs are lists of coordinates of equal length, with *atom2*
representing the apex of the angle.
If a 1D numpy array of dtype ``numpy.float64`` with ``len(atom1)`` elements is
provided in *result* then this preallocated array is filled. This can speed
up calculations.
The optional argument ``box`` ensures that periodic boundaries are taken into account when
constructing the connecting vectors between atoms, ie that the vector between atoms 1 & 2
goes between coordinates in the same image.
angles = calc_angles(coords1, coords2, coords3, [[box=None],result=angles])
Parameters
----------
coords1 : array
Coordinate array of one side of angles.
coords2 : array
Coordinate array of apex of angles.
coords3 : array
Coordinate array of other side of angles.
box : array
The unitcell dimesions for this system.
The dimensions must be provided in the same format as returned
by :attr:`MDAnalysis.coordinates.base.Timestep.dimensions`: ``[lx,
ly, lz, alpha, beta, gamma]``.
result : array, optional
Preallocated result array which must be same length as coord
arrays and ``dtype=numpy.float64``. Avoids creating the
array which saves time when the function is called repeatedly. [None]
backend
Select the type of acceleration; "serial" is always available. Other
possibilities are "OpenMP" (OpenMP).
Returns
-------
angles : array
A numpy.array of angles in radians.
.. versionadded:: 0.8
.. versionchanged:: 0.9.0
Added optional box argument to account for periodic boundaries in calculation
.. versionchanged:: 0.13.0
Added *backend* keyword.
"""
atom1 = coords1.copy('C')
atom2 = coords2.copy('C')
atom3 = coords3.copy('C')
numatom = atom1.shape[0]
_check_array(atom1, 'coords1')
_check_array(atom2, 'coords2')
_check_array(atom3, 'coords3')
_check_lengths_match(atom1, atom2, atom3)
if box is not None:
boxtype = _box_check(box)
# Convert [A,B,C,alpha,beta,gamma] to [[A],[B],[C]]
if (boxtype == 'tri_box'):
box = triclinic_vectors(box)
if (boxtype == 'tri_vecs_bad'):
box = triclinic_vectors(triclinic_box(box[0], box[1], box[2]))
if result is not None:
_check_results_array(result, (numatom,))
angles = np.asarray(result)
else:
angles = np.zeros((numatom,), np.float64)
if box is not None:
if boxtype == 'ortho':
_run("calc_angle_ortho",
args=(atom1, atom2, atom3, box, angles),
backend=backend)
else:
_run("calc_angle_triclinic",
args=(atom1, atom2, atom3, box, angles),
backend=backend)
else:
_run("calc_angle",
args=(atom1, atom2, atom3, angles),
backend=backend)
return angles
def calc_dihedrals(coords1, coords2, coords3, coords4, box=None, result=None,
backend="serial"):
"""
Calculate the dihedral angle formed by four atoms, over a list of coordinates.
Dihedral angle around axis connecting atoms 1 and 2 (i.e. the angle
between the planes spanned by atoms (0,1,2) and (1,2,3))::
3
|
1-----2
/
0
If a 1D numpy array of dtype ``numpy.float64`` with ``len(atom1)`` elements
is provided in *result* then this preallocated array is filled. This can
speed up calculations.
The optional argument ``box`` ensures that periodic boundaries are taken
into account when constructing the connecting vectors between atoms, ie
that the vector between atoms 1 & 2 goes between coordinates in the same
image::
angles = calc_dihedrals(coords1, coords2, coords3, coords4 [,box=box, result=angles])
Parameters
----------
coords1 : array
Coordinate array of 1st atom in dihedrals.
coords2 : array
Coordinate array of 2nd atom in dihedrals.
coords3 : array
Coordinate array of 3rd atom in dihedrals.
coords4 : array
Coordinate array of 4th atom in dihedrals.
box : array
The unitcell dimesions for this system.
The dimensions must be provided in the same format as returned
by :attr:`MDAnalysis.coordinates.base.Timestep.dimensions`: ``[lx,
ly, lz, alpha, beta, gamma]``.
result : array, optional
Preallocated result array which must be same length as coord
arrays and ``dtype=numpy.float64``. Avoids creating the
array which saves time when the function is called repeatedly. [None]
backend
Select the type of acceleration; "serial" is always available. Other
possibilities are "OpenMP" (OpenMP).
Returns
-------
angles : array
A numpy.array of angles in radians.
.. versionadded:: 0.8
.. versionchanged:: 0.9.0
Added optional box argument to account for periodic boundaries in calculation
.. versionchanged:: 0.11.0
Renamed from calc_torsions to calc_dihedrals
.. versionchanged:: 0.13.0
Added *backend* keyword.
"""
atom1 = coords1.copy('C')
atom2 = coords2.copy('C')
atom3 = coords3.copy('C')
atom4 = coords4.copy('C')
_check_array(atom1, 'atom1')
_check_array(atom2, 'atom2')
_check_array(atom3, 'atom3')
_check_array(atom4, 'atom4')
_check_lengths_match(atom1, atom2, atom3, atom4)
numatom = atom1.shape[0]
if box is not None:
boxtype = _box_check(box)
# Convert [A,B,C,alpha,beta,gamma] to [[A],[B],[C]]
if (boxtype == 'tri_box'):
box = triclinic_vectors(box)
if (boxtype == 'tri_vecs_bad'):
box = triclinic_vectors(triclinic_box(box[0], box[1], box[2]))
if result is not None:
_check_results_array(result, (numatom,))
angles = np.asarray(result)
else:
angles = np.zeros((numatom,), np.float64)
if box is not None:
if boxtype == 'ortho':
_run("calc_dihedral_ortho",
args=(atom1, atom2, atom3, atom4, box, angles),
backend=backend)
else:
_run("calc_dihedral_triclinic",
args=(atom1, atom2, atom3, atom4, box, angles),
backend=backend)
else:
_run("calc_dihedral",
args=(atom1, atom2, atom3, atom4, angles),
backend=backend)
return angles
def apply_PBC(incoords, box, backend="serial"):
"""Moves a set of coordinates to all be within the primary unit cell
newcoords = apply_PBC(coords, box)
Parameters
----------
coords : array
Coordinate array (of type numpy.float32).
box : array
The unitcell dimesions for this system; can be either orthogonal or
triclinic information. The dimensions must be provided in the same
format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`: ``[lx, ly, lz,
alpha, beta, gamma]``.
backend
Select the type of acceleration; "serial" is always available. Other
possibilities are "OpenMP" (OpenMP).
Returns
-------
newcoords : array
Coordinates that are now all within the primary unit cell, as defined
by box.
.. versionadded:: 0.8
.. versionchanged:: 0.13.0
Added *backend* keyword.
"""
coords = incoords.copy('C')
_check_array(coords, 'coords')
coordnum = coords.shape[0]
# determine boxtype
boxtype = _box_check(box)
# Convert [A,B,C,alpha,beta,gamma] to [[A],[B],[C]]
if (boxtype == 'tri_box'):
box = triclinic_vectors(box)
if (boxtype == 'tri_vecs_bad'):
box = triclinic_vectors(triclinic_box(box[0], box[1], box[2]))
box_inv = np.zeros((3), dtype=np.float32)
if boxtype == 'ortho':
box_inv[0] = 1.0 / box[0]
box_inv[1] = 1.0 / box[1]
box_inv[2] = 1.0 / box[2]
_run("ortho_pbc",
args=(coords, box, box_inv),
backend=backend)
else:
box_inv[0] = 1.0 / box[0][0]
box_inv[1] = 1.0 / box[1][1]
box_inv[2] = 1.0 / box[2][2]
_run("triclinic_pbc",
args=(coords, box, box_inv),
backend=backend)
return coords
| kain88-de/mdanalysis | package/MDAnalysis/lib/distances.py | Python | gpl-2.0 | 28,620 | [
"MDAnalysis"
] | bf063b785c1410fd5c445072fa5b6ce8e32f9e519922639a7c30b3ad7d3e050d |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RExperimenthub(RPackage):
"""Client to access ExperimentHub resources:
This package provides a client for the Bioconductor ExperimentHub web
resource. ExperimentHub provides a central location where curated data from
experiments, publications or training courses can be accessed. Each
resource has associated metadata, tags and date of modification. The client
creates and manages a local cache of files retrieved enabling quick and
reproducible access."""
bioc = "ExperimentHub"
version('1.16.1', commit='61d51b7ca968d6cc1befe299e0784d9a19ca51f6')
depends_on('r-biocgenerics@0.15.10:', type=('build', 'run'))
depends_on('r-annotationhub@2.19.3:', type=('build', 'run'))
depends_on('r-biocfilecache@1.5.1:', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-biocmanager', type=('build', 'run'))
depends_on('r-curl', type=('build', 'run'))
depends_on('r-rappdirs', type=('build', 'run'))
| LLNL/spack | var/spack/repos/builtin/packages/r-experimenthub/package.py | Python | lgpl-2.1 | 1,221 | [
"Bioconductor"
] | e4c25490e9a715f7cb8972432331af7697f6052b540a0cce386ef33616547c99 |
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import collections
import io
from textwrap import dedent
from typing import Optional
import pytest
from tabledata import TableData
from tcolorpy import tcolor
import pytablewriter as ptw
from pytablewriter.style import Align, Cell, FontSize, Style, ThousandSeparator
from pytablewriter.typehint import Integer, RealNumber, String
from ..._common import print_test_result
from ...data import (
float_header_list,
float_value_matrix,
headers,
mix_header_list,
mix_value_matrix,
value_matrix,
value_matrix_iter,
value_matrix_iter_1,
value_matrix_with_none,
vut_style_tabledata,
vut_styles,
)
from ._common import regexp_ansi_escape, strip_ansi_escape
try:
import pandas as pd
SKIP_DATAFRAME_TEST = False
except ImportError:
SKIP_DATAFRAME_TEST = True
Data = collections.namedtuple("Data", "table indent header value is_formatting_float expected")
normal_test_data_list = [
Data(
table="",
indent=0,
header=headers,
value=value_matrix,
is_formatting_float=True,
expected=dedent(
"""\
| a | b | c |dd | e |
|--:|----:|---|--:|----|
| 1|123.1|a |1.0| 1|
| 2| 2.2|bb |2.2| 2.2|
| 3| 3.3|ccc|3.0|cccc|
"""
),
),
Data(
table="",
indent=0,
header=headers,
value=None,
is_formatting_float=True,
expected=dedent(
"""\
| a | b | c |dd | e |
|---|---|---|---|---|
"""
),
),
Data(
table="floating point",
indent=0,
header=headers,
value=[
["1", 123.09999999999999, "a", "1", 1],
[2, 2.2000000000000002, "bb", "2.2", 2.2000000000000002],
[3, 3.2999999999999998, "ccc", "3.2999999999999998", "cccc"],
],
is_formatting_float=True,
expected=dedent(
"""\
# floating point
| a | b | c |dd | e |
|--:|----:|---|--:|----|
| 1|123.1|a |1.0| 1|
| 2| 2.2|bb |2.2| 2.2|
| 3| 3.3|ccc|3.3|cccc|
"""
),
),
Data(
table="tablename",
indent=1,
header=headers,
value=value_matrix,
is_formatting_float=True,
expected=dedent(
"""\
## tablename
| a | b | c |dd | e |
|--:|----:|---|--:|----|
| 1|123.1|a |1.0| 1|
| 2| 2.2|bb |2.2| 2.2|
| 3| 3.3|ccc|3.0|cccc|
"""
),
),
Data(
table="",
indent=0,
header=headers,
value=value_matrix_with_none,
is_formatting_float=True,
expected=dedent(
"""\
| a | b | c |dd | e |
|--:|--:|---|--:|----|
| 1| |a |1.0| |
| |2.2| |2.2| 2.2|
| 3|3.3|ccc| |cccc|
| | | | | |
"""
),
),
Data(
table="",
indent=0,
header=mix_header_list,
value=mix_value_matrix,
is_formatting_float=True,
expected=dedent(
"""\
| i | f | c | if |ifc|bool| inf |nan|mix_num | time |
|--:|---:|----|---:|---|----|--------|---|-------:|-------------------------|
| 1|1.10|aa | 1.0| 1|X |Infinity|NaN| 1|2017-01-01T00:00:00 |
| 2|2.20|bbb | 2.2|2.2| |Infinity|NaN|Infinity|2017-01-02 03:04:05+09:00|
| 3|3.33|cccc|-3.0|ccc|X |Infinity|NaN| NaN|2017-01-01T00:00:00 |
"""
),
),
Data(
table="formatting float 1",
indent=0,
header=headers,
value=value_matrix,
is_formatting_float=True,
expected=dedent(
"""\
# formatting float 1
| a | b | c |dd | e |
|--:|----:|---|--:|----|
| 1|123.1|a |1.0| 1|
| 2| 2.2|bb |2.2| 2.2|
| 3| 3.3|ccc|3.0|cccc|
"""
),
),
Data(
table="formatting float 2",
indent=0,
header=float_header_list,
value=float_value_matrix,
is_formatting_float=True,
expected=dedent(
"""\
# formatting float 2
| a | b | c |
|---:|-----------:|----:|
|0.01| 0.00125|0.000|
|1.00| 99.90000|0.010|
|1.20|999999.12300|0.001|
"""
),
),
Data(
table="not formatting float 1",
indent=0,
header=headers,
value=value_matrix,
is_formatting_float=False,
expected=dedent(
"""\
# not formatting float 1
| a | b | c |dd | e |
|--:|----:|---|--:|----|
| 1|123.1|a | 1| 1|
| 2| 2.2|bb |2.2| 2.2|
| 3| 3.3|ccc| 3|cccc|
"""
),
),
Data(
table="not formatting float 2",
indent=0,
header=float_header_list,
value=float_value_matrix,
is_formatting_float=False,
expected=dedent(
"""\
# not formatting float 2
| a | b | c |
|---:|---------:|----:|
|0.01| 0.00125| 0|
| 1| 99.9| 0.01|
| 1.2|999999.123|0.001|
"""
),
),
Data(
table="",
indent=0,
header=["Name", "xUnit", "Source", "Remarks"],
value=[
[
"Crotest",
"",
"[160]",
"MIT License. A tiny and simple test framework for Crystal\nwith common assertions and no pollution into Object class.",
"",
]
],
is_formatting_float=True,
expected=dedent(
"""\
| Name |xUnit|Source| Remarks |
|-------|-----|------|--------------------------------------------------------------------------------------------------------------------|
|Crotest| |[160] |MIT License. A tiny and simple test framework for Crystal with common assertions and no pollution into Object class.|
"""
),
),
Data(
table="",
indent=0,
header=["姓", "名", "生年月日", "郵便番号", "住所", "電話番号"],
value=[
["山田", "太郎", "2001/1/1", "100-0002", "東京都千代田区皇居外苑", "03-1234-5678"],
["山田", "次郎", "2001/1/2", "251-0036", "神奈川県藤沢市江の島1丁目", "03-9999-9999"],
],
is_formatting_float=True,
expected=dedent(
"""\
| 姓 | 名 |生年月日|郵便番号| 住所 | 電話番号 |
|----|----|--------|--------|--------------------------|------------|
|山田|太郎|2001/1/1|100-0002|東京都千代田区皇居外苑 |03-1234-5678|
|山田|次郎|2001/1/2|251-0036|神奈川県藤沢市江の島1丁目|03-9999-9999|
"""
),
),
Data(
table="quoted values",
indent=0,
header=['"quote"', '"abc efg"'],
value=[
['"1"', '"abc"'],
['"-1"', '"efg"'],
],
is_formatting_float=True,
expected=dedent(
"""\
# quoted values
|quote|abc efg|
|----:|-------|
| 1|abc |
| -1|efg |
"""
),
),
Data(
table="not str headers",
indent=0,
header=[None, 1, 0.1],
value=[[None, 1, 0.1]],
is_formatting_float=True,
expected=dedent(
"""\
# not str headers
| | 1 |0.1|
|---|--:|--:|
| | 1|0.1|
"""
),
),
Data(
table="no uniform matrix",
indent=0,
header=["a", "b", "c"],
value=[["a", 0], ["b", 1, "bb"], ["c", 2, "ccc", 0.1]],
is_formatting_float=True,
expected=dedent(
"""\
# no uniform matrix
| a | b | c |
|---|--:|---|
|a | 0| |
|b | 1|bb |
|c | 2|ccc|
"""
),
),
Data(
table="line breaks",
indent=0,
header=["a\nb", "\nc\n\nd\n", "e\r\nf"],
value=[["v1\nv1", "v2\n\nv2", "v3\r\nv3"]],
is_formatting_float=True,
expected=dedent(
"""\
# line breaks
| a b | c d | e f |
|-----|------|-----|
|v1 v1|v2 v2|v3 v3|
"""
),
),
Data(
table="empty header",
indent=0,
header=[],
value=value_matrix,
is_formatting_float=True,
expected=dedent(
"""\
# empty header
| A | B | C | D | E |
|--:|----:|---|--:|----|
| 1|123.1|a |1.0| 1|
| 2| 2.2|bb |2.2| 2.2|
| 3| 3.3|ccc|3.0|cccc|
"""
),
),
Data(
table="vertical bar",
indent=1,
header=["a|b", "|c||d|"],
value=[["|v1|v1|", "v2|v2"]],
is_formatting_float=True,
expected=r"""## vertical bar
| a\|b |\|c\|\|d\||
|-------|------|
|\|v1\|v1\||v2\|v2 |
""",
),
Data(
table="mixed value types",
indent=0,
header=["data", "v"],
value=[
[3.4375, 65.5397978633],
[65.5397978633, 127.642095727],
[189.74439359, 189.74439359],
[10064.0097539, 10001.907456],
["next", 10250.3166474],
],
is_formatting_float=True,
expected=dedent(
"""\
# mixed value types
| data | v |
|----------------|---------------:|
| 3.4375000000| 65.5397978633|
| 65.5397978633| 127.6420957270|
| 189.7443935900| 189.7443935900|
|10064.0097539000|10001.9074560000|
|next |10250.3166474000|
"""
),
),
Data(
table="list of dict",
indent=0,
header=["A", "B", "C"],
value=[
{"A": 1},
{"B": 2.1, "C": "hoge"},
{"A": 0, "B": 0.1, "C": "foo"},
{},
{"A": -1, "B": -0.1, "C": "bar", "D": "extra"},
],
is_formatting_float=False,
expected=dedent(
"""\
# list of dict
| A | B | C |
|--:|---:|----|
| 1| | |
| | 2.1|hoge|
| 0| 0.1|foo |
| | | |
| -1|-0.1|bar |
"""
),
),
Data(
table="",
indent=0,
header=[],
value=[],
is_formatting_float=True,
expected="",
),
]
table_writer_class = ptw.MarkdownTableWriter
def trans_func(value):
if value is None:
return ""
if value is True:
return "X"
if value is False:
return ""
return value
class Test_MarkdownTableWriter_write_new_line:
def test_normal(self, capsys):
writer = table_writer_class()
writer.write_null_line()
out, _err = capsys.readouterr()
assert out == "\n"
class Test_MarkdownTableWriter_constructor:
def test_normal_kwargs(self):
writer = table_writer_class(
headers=["w/ strike", "w/ line through"],
value_matrix=[["strike", "line-through"]],
column_styles=[
Style(decoration_line="strike"),
Style(decoration_line="line-through"),
],
)
expected = dedent(
"""\
|w/ strike|w/ line through|
|---------|---------------|
|strike |line-through |
"""
)
out = str(writer)
print_test_result(expected=expected, actual=out)
assert regexp_ansi_escape.search(out)
assert strip_ansi_escape(out) == expected
class Test_MarkdownTableWriter_repr:
def test_normal_empty(self):
writer = table_writer_class()
assert str(writer).strip() == ""
def test_normal_ansi(self):
writer = table_writer_class()
writer.column_styles = [
Style(decoration_line="strike"),
Style(decoration_line="line-through"),
]
writer.headers = ["w/ strike", "w/ line through"]
writer.value_matrix = [["strike", "line-through"]]
expected = dedent(
"""\
|w/ strike|w/ line through|
|---------|---------------|
|strike |line-through |
"""
)
out = str(writer)
print_test_result(expected=expected, actual=out)
assert regexp_ansi_escape.search(out)
assert strip_ansi_escape(out) == expected
class Test_MarkdownTableWriter_table_format:
def test_normal(self):
assert table_writer_class().table_format is ptw.TableFormat.MARKDOWN
class Test_MarkdownTableWriter_write_table:
@pytest.mark.parametrize(
["table", "indent", "header", "value", "is_formatting_float", "expected"],
[
[
data.table,
data.indent,
data.header,
data.value,
data.is_formatting_float,
data.expected,
]
for data in normal_test_data_list
],
)
def test_normal(self, capsys, table, indent, header, value, is_formatting_float, expected):
writer = table_writer_class(
table_name=table,
headers=header,
value_matrix=value,
is_formatting_float=is_formatting_float,
)
writer.set_indent_level(indent)
writer.register_trans_func(trans_func)
writer.write_table()
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
assert writer.dumps() == expected
assert str(writer) == expected
writer = table_writer_class(
table_name=table,
headers=header,
value_matrix=value,
indent_level=indent,
is_formatting_float=is_formatting_float,
)
writer.register_trans_func(trans_func)
assert writer.dumps() == expected
def test_normal_single_tabledata(self, capsys):
writer = table_writer_class()
writer.from_tabledata(
TableData(
"loader_mapping",
["Name", "Loader"],
[
["csv", "CsvTableFileLoader"],
["excel", "ExcelTableFileLoader"],
["html", "HtmlTableFileLoader"],
["markdown", "MarkdownTableFileLoader"],
["mediawiki", "MediaWikiTableFileLoader"],
["json", "JsonTableFileLoader"],
["Long Format Name", "Loader"],
],
)
)
writer.write_table()
expected = dedent(
"""\
# loader_mapping
| Name | Loader |
|----------------|------------------------|
|csv |CsvTableFileLoader |
|excel |ExcelTableFileLoader |
|html |HtmlTableFileLoader |
|markdown |MarkdownTableFileLoader |
|mediawiki |MediaWikiTableFileLoader|
|json |JsonTableFileLoader |
|Long Format Name|Loader |
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
def test_normal_multiple_write(self, capsys):
writer = table_writer_class(is_write_null_line_after_table=True)
writer.from_tabledata(
TableData(
"first",
["Name", "Loader"],
[
["csv", "CsvTableFileLoader"],
["excel", "ExcelTableFileLoader"],
],
)
)
writer.write_table()
writer.from_tabledata(
TableData(
"second",
["a", "b", "c"],
[
["1", "AA", "abc"],
["2", "BB", "zzz"],
],
)
)
writer.write_table()
expected = dedent(
"""\
# first
|Name | Loader |
|-----|--------------------|
|csv |CsvTableFileLoader |
|excel|ExcelTableFileLoader|
# second
| a | b | c |
|--:|---|---|
| 1|AA |abc|
| 2|BB |zzz|
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
def test_normal_type_hints(self):
writer = table_writer_class(
table_name="type hints",
headers=["a", "b"],
value_matrix=[
[0, 1],
[11, 12],
],
)
expected = dedent(
"""\
# type hints
| a | b |
|--:|--:|
| 0| 1|
| 11| 12|
"""
)
out_wo_type_hints = writer.dumps()
print_test_result(expected=expected, actual=out_wo_type_hints)
writer.type_hints = [String, RealNumber]
out_w_type_hints = writer.dumps()
assert writer.type_hints == [String, RealNumber]
assert out_wo_type_hints != out_w_type_hints
writer.type_hints = ["string", "float"]
assert writer.type_hints == [String, RealNumber]
assert out_w_type_hints == writer.dumps()
def test_normal_style_align(self):
writer = table_writer_class()
writer.from_tabledata(
TableData(
"auto align",
["left", "right", "center", "auto", "auto", "None"],
[
[0, "r", "center align", 0, "a", "n"],
[11, "right align", "bb", 11, "auto", "none (auto)"],
],
)
)
expected = dedent(
"""\
# auto align
|left| right | center |auto|auto| None |
|---:|-----------|------------|---:|----|-----------|
| 0|r |center align| 0|a |n |
| 11|right align|bb | 11|auto|none (auto)|
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
writer.table_name = "specify alignment for each column manually"
writer.column_styles = [
Style(align=Align.LEFT),
Style(align=Align.RIGHT),
Style(align=Align.CENTER),
Style(align=Align.AUTO),
Style(align=Align.AUTO),
None,
]
expected = dedent(
"""\
# specify alignment for each column manually
|left| right | center |auto|auto| None |
|----|----------:|:----------:|---:|----|-----------|
|0 | r|center align| 0|a |n |
|11 |right align| bb | 11|auto|none (auto)|
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_style_thousand_separator(self, capsys):
writer = table_writer_class()
writer.from_tabledata(
TableData(
"",
["none_format", "thousand_separator_i", "thousand_separator_f", "f", "wo_f"],
[
[1000, 1234567, 1234567.8, 1234.5678, 1234567.8],
[1000, 1234567, 1234567.8, 1234.5678, 1234567.8],
],
)
)
writer.column_styles = [
Style(thousand_separator=ThousandSeparator.NONE),
Style(thousand_separator=ThousandSeparator.COMMA),
Style(thousand_separator=ThousandSeparator.UNDERSCORE),
Style(thousand_separator=ThousandSeparator.SPACE),
]
out = writer.dumps()
expected = dedent(
"""\
|none_format|thousand_separator_i|thousand_separator_f| f | wo_f |
|----------:|-------------------:|-------------------:|---------:|--------:|
| 1000| 1,234,567| 1_234_567.8|1 234.5678|1234567.8|
| 1000| 1,234,567| 1_234_567.8|1 234.5678|1234567.8|
"""
)
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_dequote(self):
headers = ["id", "name"]
values = [
["1", 'Debian 11 "Bullseye"'],
["2", "Debian 11 'Bullseye'"],
["3", '"abc" efg'],
["4", "'abc' efg"],
["5", '"abc" "efg"'],
["6", "'abc' 'efg'"],
["7", "'abc'"],
["8", '"abc"'],
["9", {"1": 1}],
["10", {"1"}],
]
writer = table_writer_class(
table_name="zone",
headers=headers,
value_matrix=values,
dequote=True,
margin=0,
)
expected = dedent(
"""\
# zone
|id | name |
|--:|--------------------|
| 1|Debian 11 "Bullseye"|
| 2|Debian 11 'Bullseye'|
| 3|"abc" efg |
| 4|'abc' efg |
| 5|"abc" "efg" |
| 6|'abc' 'efg' |
| 7|abc |
| 8|abc |
| 9|{'1': 1} |
| 10|{'1'} |
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
writer = table_writer_class(
table_name="zone",
headers=headers,
value_matrix=values,
dequote=False,
margin=0,
)
expected = dedent(
"""\
# zone
|id | name |
|--:|--------------------|
| 1|Debian 11 "Bullseye"|
| 2|Debian 11 'Bullseye'|
| 3|"abc" efg |
| 4|'abc' efg |
| 5|"abc" "efg" |
| 6|'abc' 'efg' |
| 7|'abc' |
| 8|"abc" |
| 9|{'1': 1} |
| 10|{'1'} |
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_style_font_size(self):
writer = table_writer_class()
writer.table_name = "style test: font size will not be affected"
writer.headers = ["none", "empty_style", "tiny", "small", "medium", "large"]
writer.value_matrix = [
[111, 111, 111, 111, 111, 111],
[1234, 1234, 1234, 1234, 1234, 1234],
]
writer.column_styles = [
None,
Style(),
Style(font_size=FontSize.TINY),
Style(font_size=FontSize.SMALL),
Style(font_size=FontSize.MEDIUM),
Style(font_size=FontSize.LARGE),
]
expected = dedent(
"""\
# style test: font size will not be affected
|none|empty_style|tiny|small|medium|large|
|---:|----------:|---:|----:|-----:|----:|
| 111| 111| 111| 111| 111| 111|
|1234| 1234|1234| 1234| 1234| 1234|
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_style_font_weight(self):
writer = table_writer_class()
writer.table_name = "style test: bold"
writer.headers = ["normal", "bold"]
writer.value_matrix = [
[11, 11],
[123456, 123456],
]
writer.column_styles = [Style(font_weight="normal"), Style(font_weight="bold")]
expected = dedent(
"""\
# style test: bold
|normal| bold |
|-----:|---------:|
| 11| **11**|
|123456|**123456**|
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert regexp_ansi_escape.search(out)
assert strip_ansi_escape(out) == expected
def test_normal_style_mix(self):
writer = table_writer_class(column_styles=vut_styles)
writer.from_tabledata(vut_style_tabledata)
expected = dedent(
"""\
# style test
|none|empty|tiny|small|medium|large|null w/ bold| L bold |S italic|L bold italic|
|---:|----:|---:|----:|-----:|----:|------------|-------:|-------:|------------:|
| 111| 111| 111| 111| 111| 111| | **111**| _111_| _**111**_|
|1234| 1234|1234| 1234| 1,234|1 234| |**1234**| _1234_| _**1234**_|
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert regexp_ansi_escape.search(out)
assert strip_ansi_escape(out) == expected
def test_normal_style_filter(self):
def style_filter(cell: Cell, **kwargs) -> Optional[Style]:
if isinstance(cell.value, int):
return Style(align="left")
if cell.value == "c":
return Style(align="center")
if cell.value == "r":
return Style(align="right")
return None
writer = table_writer_class(
table_name="style filter",
headers=["left", "center", "right", "overwrite l", "overwrite c", "overwrite r"],
value_matrix=[
[1, "c", "r", 1, "c", "r"],
[2.2, "left", "left", 2.2, "right", "center"],
],
margin=1,
column_styles=[
None,
None,
None,
Style(align="center"),
Style(align="right"),
Style(align="center"),
],
)
output_wo_theme = writer.dumps()
writer.add_style_filter(style_filter)
expected = dedent(
"""\
# style filter
| left | center | right | overwrite l | overwrite c | overwrite r |
| ---: | ------ | ----- | :---------: | ----------: | :---------: |
| 1.0 | c | r | 1.0 | c | r |
| 2.2 | left | left | 2.2 | right | center |
"""
)
output_w_theme = writer.dumps()
print_test_result(expected=expected, actual=output_w_theme)
assert output_w_theme != output_wo_theme
assert output_w_theme == expected
assert (
table_writer_class(
table_name="style filter",
headers=["left", "center", "right", "overwrite l", "overwrite c", "overwrite r"],
value_matrix=[
[1, "c", "r", 1, "c", "r"],
[2.2, "left", "left", 2.2, "right", "center"],
],
margin=1,
column_styles=[
None,
None,
None,
Style(align="center"),
Style(align="right"),
Style(align="center"),
],
).dumps()
== output_wo_theme
)
def test_normal_clear_theme(self):
writer = table_writer_class()
writer.table_name = "style test: bold"
writer.headers = ["normal", "bold"]
writer.value_matrix = [[11, 11], [123456, 123456]]
out_wo_theme = writer.dumps()
writer.set_theme("altrow")
out_w_theme = writer.dumps()
assert out_w_theme != out_wo_theme
# set theme at constructor
writer = table_writer_class(
table_name="style test: bold",
headers=["normal", "bold"],
value_matrix=[[11, 11], [123456, 123456]],
theme="altrow",
)
assert writer.dumps() == out_w_theme
writer.clear_theme()
out_wo_theme = writer.dumps()
assert out_w_theme != out_wo_theme
assert regexp_ansi_escape.search(out_wo_theme) is None
def test_except_set_theme(self):
writer = table_writer_class()
with pytest.warns(UserWarning):
writer.set_theme("not_existing_theme")
def test_normal_set_style(self):
writer = table_writer_class(
table_name="set style method",
headers=["normal", "style by idx", "style by header"],
value_matrix=[
[11, 11, 11],
[123456, 123456, 123456],
],
)
writer.set_style(1, Style(font_weight="bold", thousand_separator=","))
writer.set_style(
"style by header", Style(align="center", font_weight="bold", thousand_separator=" ")
)
expected = dedent(
"""\
# set style method
|normal|style by idx|style by header|
|-----:|-----------:|:-------------:|
| 11| **11**| **11** |
|123456| **123,456**| **123 456** |
"""
)
output = writer.dumps()
print_test_result(expected=expected, actual=output)
assert regexp_ansi_escape.search(output)
assert strip_ansi_escape(output) == expected
writer.table_name = "change style"
writer.set_style(1, Style(align="right", font_style="italic"))
writer.set_style("style by header", Style())
expected = dedent(
"""\
# change style
|normal|style by idx|style by header|
|-----:|-----------:|--------------:|
| 11| _11_| 11|
|123456| _123456_| 123456|
"""
)
output = writer.dumps()
print_test_result(expected=expected, actual=output)
assert strip_ansi_escape(output) == expected
def test_normal_ansi_color(self, capsys):
writer = table_writer_class()
writer.table_name = "ANCI escape sequence"
writer.headers = ["colored_i", "colored_f", "colored_s", "wo_anci"]
writer.value_matrix = [
[
tcolor("111", color="red"),
tcolor("1.1", color="green"),
tcolor("abc", color="blue"),
"abc",
],
[
tcolor("0", color="red"),
tcolor("0.12", color="green"),
tcolor("abcdef", color="blue"),
"abcdef",
],
]
writer.write_table()
expected = dedent(
"""\
# ANCI escape sequence
|colored_i|colored_f|colored_s|wo_anci|
|--------:|--------:|---------|-------|
| 111| 1.1|abc |abc |
| 0| 0.12|abcdef |abcdef |
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert regexp_ansi_escape.search(out)
assert strip_ansi_escape(out) == expected
def test_normal_ansi_style(self):
writer = table_writer_class(
headers=["w/ strike", "w/ line through"],
value_matrix=[["strike", "line-through"]],
column_styles=[
Style(decoration_line="strike"),
Style(decoration_line="line-through"),
],
)
expected = dedent(
"""\
|w/ strike|w/ line through|
|---------|---------------|
|strike |line-through |
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert regexp_ansi_escape.search(out)
assert strip_ansi_escape(out) == expected
def test_normal_colorize_terminal(self):
column_styles = [
Style(color="red"),
Style(bg_color="white"),
]
writer = table_writer_class(
column_styles=column_styles,
headers=["fg color", "bg color"],
value_matrix=[["hoge", "foo"]],
colorize_terminal=True,
)
out = writer.dumps()
assert regexp_ansi_escape.search(out)
assert (
table_writer_class(
headers=["fg color", "bg color"],
value_matrix=[["hoge", "foo"]],
column_styles=column_styles,
colorize_terminal=True,
).dumps()
== out
)
writer.colorize_terminal = False
out = writer.dumps()
assert regexp_ansi_escape.search(out) is None
assert (
table_writer_class(
headers=["fg color", "bg color"],
value_matrix=[["hoge", "foo"]],
column_styles=column_styles,
colorize_terminal=False,
).dumps()
== out
)
def test_normal_enable_ansi_escape(self):
writer = table_writer_class(
column_styles=[
Style(font_weight="bold"),
Style(decoration_line="line-through"),
],
headers=["w/ bold", "w/ line through"],
value_matrix=[["hoge", "foo"]],
colorize_terminal=True,
enable_ansi_escape=True,
)
out = writer.dumps()
assert regexp_ansi_escape.search(out)
writer.enable_ansi_escape = False
out = writer.dumps()
assert regexp_ansi_escape.search(out) is None
writer.colorize_terminal = False
writer.enable_ansi_escape = True
out = writer.dumps()
assert regexp_ansi_escape.search(out)
def test_normal_margin_1(self, capsys):
writer = table_writer_class(margin=1)
writer.from_tabledata(TableData("", headers, value_matrix))
writer.write_table()
expected = dedent(
"""\
| a | b | c | dd | e |
| --: | ----: | --- | --: | ---- |
| 1 | 123.1 | a | 1.0 | 1 |
| 2 | 2.2 | bb | 2.2 | 2.2 |
| 3 | 3.3 | ccc | 3.0 | cccc |
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
def test_normal_margin_2(self, capsys):
writer = table_writer_class(margin=2)
writer.from_tabledata(TableData("", headers, value_matrix))
writer.write_table()
expected = dedent(
"""\
| a | b | c | dd | e |
| --: | ----: | --- | --: | ---- |
| 1 | 123.1 | a | 1.0 | 1 |
| 2 | 2.2 | bb | 2.2 | 2.2 |
| 3 | 3.3 | ccc | 3.0 | cccc |
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
def test_normal_register_trans_func(self):
writer = table_writer_class(
headers=["a", "b"],
value_matrix=[
["foo", True],
["bar", False],
],
)
writer.register_trans_func(trans_func)
expected = dedent(
"""\
| a | b |
|---|---|
|foo|X |
|bar| |
"""
)
output = writer.dumps()
print_test_result(expected=expected, actual=output)
assert output == expected
def test_normal_flavor_commonmark(self):
writer = table_writer_class(
enable_ansi_escape=False,
column_styles=[
None,
Style(decoration_line="strike"),
Style(decoration_line="line-through"),
],
headers=["w/o style", "w/ strike", "w/ line through"],
value_matrix=[["no", "strike", "line-through"]],
flavor="CommonMark",
)
expected = dedent(
"""\
|w/o style|w/ strike|w/ line through|
|---------|---------|---------------|
|no |strike |line-through |
"""
)
output = writer.dumps()
print_test_result(expected=expected, actual=output)
assert output == expected
def test_normal_flavor_gfm(self):
writer = table_writer_class(
enable_ansi_escape=False,
column_styles=[
None,
Style(decoration_line="strike"),
Style(decoration_line="line-through"),
],
headers=["w/o style", "w/ strike", "w/ line through"],
value_matrix=[["no", "strike", "line-through"]],
flavor="gfm",
)
expected = dedent(
"""\
|w/o style|w/ strike |w/ line through |
|---------|----------|----------------|
|no |~~strike~~|~~line-through~~|
"""
)
output = writer.dumps()
print_test_result(expected=expected, actual=output)
assert output == expected
assert output != writer.dumps(flavor="CommonMark")
def test_normal_flavor_kramdown(self):
writer = table_writer_class(
table_name="kramdown/Jeklly",
enable_ansi_escape=False,
column_styles=[
None,
Style(decoration_line="strike"),
Style(decoration_line="line-through"),
],
headers=["w/o style", "w/ strike", "w/ line through"],
value_matrix=[["no", "strike", "line-through"]],
flavor="kramdown",
)
expected = dedent(
"""\
# kramdown/Jeklly
|w/o style|w/ strike|w/ line through|
|---------|---------|---------------|
|no |strike |line-through |
"""
)
output = writer.dumps()
print_test_result(expected=expected, actual=output)
assert output == expected
assert output != writer.dumps(flavor="gfm")
def test_normal_avoid_overwrite_stream_by_dumps(self):
writer = table_writer_class(headers=["a", "b"], value_matrix=[["foo", "bar"]])
writer.stream = io.StringIO()
expected = dedent(
"""\
| a | b |
|---|---|
|foo|bar|
"""
)
output = writer.dumps()
print_test_result(expected=expected, actual=output)
assert output == expected
print("--------------------")
writer.write_table()
output = writer.stream.getvalue()
print_test_result(expected=expected, actual=output)
assert output == expected
def test_normal_escape_html_tag(self, capsys):
writer = table_writer_class(
headers=["no", "text"],
value_matrix=[[1, "<caption>Table 'formatting for Jupyter Notebook.</caption>"]],
)
writer.update_preprocessor(is_escape_html_tag=True)
writer.write_table()
expected = dedent(
"""\
|no | text |
|--:|---------------------------------------------------------------------------|
| 1|<caption>Table 'formatting for Jupyter Notebook.</caption>|
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
def test_normal_escape_html_tag_from_tabledata(self, capsys):
writer = table_writer_class()
writer.from_tabledata(
TableData(
"",
["no", "text"],
[[1, "<caption>Table 'formatting for Jupyter Notebook.</caption>"]],
)
)
writer.update_preprocessor(is_escape_html_tag=True)
writer.write_table()
expected = dedent(
"""\
|no | text |
|--:|---------------------------------------------------------------------------|
| 1|<caption>Table 'formatting for Jupyter Notebook.</caption>|
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
class Test_MarkdownTableWriter_write_table_iter:
@pytest.mark.parametrize(
["table", "header", "value", "expected"],
[
[
"tablename",
["ha", "hb", "hc"],
value_matrix_iter,
dedent(
"""\
# tablename
| ha | hb | hc |
|---:|---:|---:|
| 1| 2| 3|
| 11| 12| 13|
| 1| 2| 3|
| 11| 12| 13|
| 101| 102| 103|
|1001|1002|1003|
"""
),
],
[
"mix length",
["string", "hb", "hc"],
value_matrix_iter_1,
dedent(
"""\
# mix length
| string | hb | hc |
|-----------------------------|----:|---:|
|a b c d e f g h i jklmn | 2.1| 3|
|aaaaa | 12.1| 13|
|bbb | 2| 3|
|cc | 12| 13|
|a | 102| 103|
| | 1002|1003|
"""
),
],
],
)
def test_normal(self, capsys, table, header, value, expected):
writer = table_writer_class(
table_name=table, headers=header, value_matrix=value, iteration_length=len(value)
)
writer.write_table_iter()
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
class Test_MarkdownTableWriter_dump:
def test_normal(self, tmpdir):
test_filepath = str(tmpdir.join("test.sqlite"))
writer = table_writer_class(
headers=["a", "b"],
value_matrix=[["foo", "bar"]],
column_styles=[
Style(color="red"),
Style(bg_color="white"),
],
)
writer.dump(test_filepath)
expected = dedent(
"""\
| a | b |
|---|---|
|foo|bar|
"""
)
with open(test_filepath) as f:
output = f.read()
print_test_result(expected=expected, actual=output)
assert output == expected
class Test_MarkdownTableWriter_from_writer:
def test_normal(self):
writer_rhs = table_writer_class()
writer_rhs.from_tabledata(
TableData(
"loader_mapping",
["Name", "Loader"],
[
["csv", "CsvTableFileLoader"],
["excel", "ExcelTableFileLoader"],
["html", "HtmlTableFileLoader"],
["markdown", "MarkdownTableFileLoader"],
["mediawiki", "MediaWikiTableFileLoader"],
["json", "JsonTableFileLoader"],
["Long Format Name", "Loader"],
],
)
)
rhs = writer_rhs.dumps()
writer_lhs = table_writer_class()
writer_lhs.from_writer(writer_rhs)
lhs = writer_lhs.dumps()
print_test_result(expected=lhs, actual=rhs)
assert lhs == rhs
class Test_MarkdownTableWriter_from_tablib:
def test_normal_multiple_write(self, capsys):
try:
import tablib
except ImportError:
pytest.skip("requires tablib")
data = tablib.Dataset()
data.headers = ["a", "b", "c"]
data.append(["1", "AA", "abc"])
data.append(["2", "BB", "zzz"])
writer = table_writer_class()
writer.from_tablib(data)
writer.write_table()
expected = dedent(
"""\
| a | b | c |
|--:|---|---|
| 1|AA |abc|
| 2|BB |zzz|
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
class Test_MarkdownTableWriter_line_break_handling:
@pytest.mark.parametrize(
["value", "expected"],
[
[
ptw.LineBreakHandling.REPLACE,
dedent(
"""\
|no | text |
|--:|------------|
| 1|first second|
"""
),
],
[
ptw.LineBreakHandling.ESCAPE,
r"""|no | text |
|--:|-------------|
| 1|first\nsecond|
""",
],
[
"escape",
r"""|no | text |
|--:|-------------|
| 1|first\nsecond|
""",
],
],
)
def test_normal_line(self, value, expected):
writer = table_writer_class(headers=["no", "text"], value_matrix=[[1, "first\nsecond"]])
writer.update_preprocessor(line_break_handling=value)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
@pytest.mark.skipif(SKIP_DATAFRAME_TEST, reason="required package not found")
class Test_MarkdownTableWriter_from_dataframe:
@pytest.mark.parametrize(
["add_index_column", "expected"],
[
[
False,
dedent(
"""\
# add_index_column: False
| A | B |
|--:|--:|
| 1| 10|
| 2| 11|
"""
),
],
[
True,
dedent(
"""\
# add_index_column: True
| | A | B |
|---|--:|--:|
|a | 1| 10|
|b | 2| 11|
"""
),
],
],
)
def test_normal(self, tmpdir, add_index_column, expected):
writer = table_writer_class(table_name=f"add_index_column: {add_index_column}")
df = pd.DataFrame({"A": [1, 2], "B": [10, 11]}, index=["a", "b"])
writer.from_dataframe(df, add_index_column=add_index_column)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
# pickle test
df_pkl_filepath = str(tmpdir.join("df.pkl"))
df.to_pickle(df_pkl_filepath)
writer.from_dataframe(df_pkl_filepath, add_index_column=add_index_column)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_overwrite_type_hints(self):
writer = table_writer_class(table_name="overwrite_type_hints", type_hints=[Integer])
df = pd.DataFrame({"A": [1.1, 2.2], "B": [10.1, 11.2]}, index=["a", "b"])
writer.from_dataframe(df, overwrite_type_hints=False)
not_overwrite = writer.dumps()
writer.from_dataframe(df, overwrite_type_hints=True)
overwrite = writer.dumps()
assert not_overwrite != overwrite
@pytest.mark.skipif(SKIP_DATAFRAME_TEST, reason="required package not found")
class Test_MarkdownTableWriter_from_series:
@pytest.mark.parametrize(
["add_index_column", "expected"],
[
[
False,
dedent(
"""\
# add_index_column: False
| value |
| ------------------: |
| 100.000000000000000 |
| 49.500000000000000 |
| 29.011491975882016 |
| 0.000000000000000 |
| 24.750000000000000 |
| 49.500000000000000 |
| 74.250000000000000 |
| 99.000000000000000 |
"""
),
],
[
True,
dedent(
"""\
# add_index_column: True
| | value |
| ----- | ------------------: |
| count | 100.000000000000000 |
| mean | 49.500000000000000 |
| std | 29.011491975882016 |
| min | 0.000000000000000 |
| 25% | 24.750000000000000 |
| 50% | 49.500000000000000 |
| 75% | 74.250000000000000 |
| max | 99.000000000000000 |
"""
),
],
],
)
def test_normal(self, add_index_column, expected):
writer = table_writer_class(table_name=f"add_index_column: {add_index_column}", margin=1)
writer.from_series(
pd.Series(list(range(100))).describe(), add_index_column=add_index_column
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
| thombashi/pytablewriter | test/writer/text/test_markdown_writer.py | Python | mit | 51,368 | [
"CRYSTAL"
] | 9b047b4a960c9ee0d41a39dbeeebfe719e572651c3f89805f96a3085c86c6d3d |
r"""
Gradient analyses (:mod:`skbio.stats.gradient`)
===============================================
.. currentmodule:: skbio.stats.gradient
This module provides functionality for performing gradient analyses.
The algorithms included in this module mainly allows performing analysis of
volatility on time series data, but they can be applied to any data that
contains a gradient.
Classes
-------
.. autosummary::
:toctree:
GradientANOVA
AverageGradientANOVA
TrajectoryGradientANOVA
FirstDifferenceGradientANOVA
WindowDifferenceGradientANOVA
GroupResults
CategoryResults
GradientANOVAResults
Examples
--------
Assume we have the following coordinates:
>>> import numpy as np
>>> import pandas as pd
>>> from skbio.stats.gradient import AverageGradientANOVA
>>> coord_data = {'PC.354': np.array([0.2761, -0.0341, 0.0633, 0.1004]),
... 'PC.355': np.array([0.2364, 0.2186, -0.0301, -0.0225]),
... 'PC.356': np.array([0.2208, 0.0874, -0.3519, -0.0031]),
... 'PC.607': np.array([-0.1055, -0.4140, -0.15, -0.116]),
... 'PC.634': np.array([-0.3716, 0.1154, 0.0721, 0.0898])}
>>> coords = pd.DataFrame.from_dict(coord_data, orient='index')
the following metadata map:
>>> metadata_map = {'PC.354': {'Treatment': 'Control', 'Weight': '60'},
... 'PC.355': {'Treatment': 'Control', 'Weight': '55'},
... 'PC.356': {'Treatment': 'Control', 'Weight': '50'},
... 'PC.607': {'Treatment': 'Fast', 'Weight': '65'},
... 'PC.634': {'Treatment': 'Fast', 'Weight': '68'}}
>>> metadata_map = pd.DataFrame.from_dict(metadata_map, orient='index')
and the following array with the proportion explained of each coord:
>>> prop_expl = np.array([25.6216, 15.7715, 14.1215, 11.6913, 9.8304])
Then to compute the average trajectory of this data:
>>> av = AverageGradientANOVA(coords, prop_expl, metadata_map,
... trajectory_categories=['Treatment'],
... sort_category='Weight')
>>> trajectory_results = av.get_trajectories()
Check the algorithm used to compute the trajectory_results:
>>> print(trajectory_results.algorithm)
avg
Check if we weighted the data or not:
>>> print(trajectory_results.weighted)
False
Check the results of one of the categories:
>>> print(trajectory_results.categories[0].category)
Treatment
>>> print(trajectory_results.categories[0].probability)
0.0118478282382
Check the results of one group of one of the categories:
>>> print(trajectory_results.categories[0].groups[0].name)
Control
>>> print(trajectory_results.categories[0].groups[0].trajectory)
[ 3.52199973 2.29597001 3.20309816]
>>> print(trajectory_results.categories[0].groups[0].info)
{'avg': 3.007022633956606}
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from copy import deepcopy
from collections import defaultdict
from numbers import Integral
import numpy as np
from natsort import realsorted
from scipy.stats import f_oneway
from skbio.util._decorator import experimental
def _weight_by_vector(trajectories, w_vector):
r"""weights the values of `trajectories` given a weighting vector
`w_vector`.
Each value in `trajectories` will be weighted by the 'rate of change'
to 'optimal rate of change' ratio. The 'rate of change' of a vector
measures how each point in the vector changes with respect to its
predecessor point. The 'optimal rate of change' is the rate of change
in which each point in the vector performs the same change than its
predecessor, meaning that when calling this function over evenly spaced
`w_vector` values, no change will be reflected on the output.
Parameters
----------
trajectories: pandas.DataFrame
Values to weight
w_vector: pandas.Series
Values used to weight `trajectories`
Returns
-------
pandas.DataFrame
A weighted version of `trajectories`.
Raises
------
ValueError
If `trajectories` and `w_vector` don't have equal lengths
If `w_vector` is not a gradient
TypeError
If `trajectories` and `w_vector` are not iterables
"""
try:
if len(trajectories) != len(w_vector):
raise ValueError("trajectories (%d) & w_vector (%d) must be equal "
"lengths" % (len(trajectories), len(w_vector)))
except TypeError:
raise TypeError("trajectories and w_vector must be iterables")
# check no repeated values are passed in the weighting vector
if len(set(w_vector)) != len(w_vector):
raise ValueError("The weighting vector must be a gradient")
# no need to weight in case of a one element vector
if len(w_vector) == 1:
return trajectories
# Cast to float so divisions have a floating point resolution
total_length = float(max(w_vector) - min(w_vector))
# Reflects the expected gradient between subsequent values in w_vector
# the first value isn't weighted so subtract one from the number of
# elements
optimal_gradient = total_length/(len(w_vector)-1)
# for all elements apply the weighting function
for i, idx in enumerate(trajectories.index):
# Skipping the first element is it doesn't need to be weighted
if i != 0:
trajectories.loc[idx] = (
trajectories.loc[idx] * optimal_gradient /
np.abs((w_vector[i] - w_vector[i-1]))
)
return trajectories
def _ANOVA_trajectories(category, res_by_group):
r"""Run ANOVA over `res_by_group`
If ANOVA cannot be run in the current category (because either there is
only one group in category or there is a group with only one member)
the result CategoryResults instance has `probability` and `groups` set
to None and message is set to a string explaining why ANOVA was not run
Returns
-------
CategoryResults
An instance of CategoryResults holding the results of the trajectory
analysis applied on `category`
"""
# If there is only one group under category we cannot run ANOVA
if len(res_by_group) == 1:
return CategoryResults(category, None, None,
'Only one value in the group.')
# Check if groups can be tested using ANOVA. ANOVA testing requires
# all elements to have at least size greater to one.
values = [res.trajectory.astype(float) for res in res_by_group]
if any([len(value) == 1 for value in values]):
return CategoryResults(category, None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
# We are ok to run ANOVA
_, p_val = f_oneway(*values)
return CategoryResults(category, p_val, res_by_group, None)
class GroupResults:
"""Store the trajectory results of a group of a metadata category
Attributes
----------
name : str
The name of the group within the metadata category
trajectory : array like
The result trajectory in an 1-D numpy array
mean : float
The mean of the trajectory
info : dict
Any extra information computed by the trajectory algorithm. Depends on
the algorithm
message : str
A message with information of the execution of the algorithm
"""
@experimental(as_of="0.4.0")
def __init__(self, name, trajectory, mean, info, message):
self.name = name
self.trajectory = trajectory
self.mean = mean
self.info = info
self.message = message
@experimental(as_of="0.4.0")
def to_files(self, out_f, raw_f):
r"""Save the trajectory analysis results for a category group to files
in text format.
Parameters
----------
out_f : file-like object
File-like object to write trajectory analysis data to. Must have a
`write` method. It is the caller's responsibility to close
`out_f` when done (if necessary)
raw_f : file-like object
File-like object to write trajectories trajectory values. Must have
a `write` method. It is the caller's responsibility to close
`out_f` when done (if necessary)
"""
out_f.write('For group "%s", the group means is: %f\n'
% (self.name, self.mean))
raw_f.write('For group "%s":\n' % self.name)
if self.message:
out_f.write('%s\n' % self.message)
raw_f.write('%s\n' % self.message)
out_f.write('The info is: %s\n'
% sorted(((k, v) for k, v in self.info.items())))
raw_f.write('The trajectory is:\n[%s]\n'
% ", ".join(map(str, self.trajectory)))
class CategoryResults:
"""Store the trajectory results of a metadata category
Attributes
----------
category : str
The name of the category
probability : float
The ANOVA probability that the category groups are independent
groups : list of GroupResults
The trajectory results for each group in the category
message : str
A message with information of the execution of the algorithm
"""
@experimental(as_of="0.4.0")
def __init__(self, category, probability, groups, message):
self.category = category
self.probability = probability
self.groups = groups
self.message = message
@experimental(as_of="0.4.0")
def to_files(self, out_f, raw_f):
r"""Save the trajectory analysis results for a category to files in
text format.
Parameters
----------
out_f : file-like object
File-like object to write trajectory analysis data to. Must have a
`write` method. It is the caller's responsibility to close `out_f`
when done (if necessary)
raw_f : file-like object
File-like object to write trajectory raw values. Must have a
`write` method. It is the caller's responsibility to close `out_f`
when done (if necessary)
"""
if self.probability is None:
out_f.write('Grouped by "%s": %s\n'
% (self.category, self.message))
else:
out_f.write('Grouped by "%s", probability: %f\n'
% (self.category, self.probability))
raw_f.write('Grouped by "%s"\n' % self.category)
for group in self.groups:
group.to_files(out_f, raw_f)
class GradientANOVAResults:
"""Store the trajectory results
Attributes
----------
algorithm : str
The algorithm used to compute trajectories
weighted : bool
If true, a weighting vector was used
categories : list of CategoryResults
The trajectory results for each metadata category
"""
@experimental(as_of="0.4.0")
def __init__(self, algorithm, weighted, categories):
self.algorithm = algorithm
self.weighted = weighted
self.categories = categories
@experimental(as_of="0.4.0")
def to_files(self, out_f, raw_f):
r"""Save the trajectory analysis results to files in text format.
Parameters
----------
out_f : file-like object
File-like object to write trajectories analysis data to. Must have
a `write` method. It is the caller's responsibility to close
`out_f` when done (if necessary)
raw_f : file-like object
File-like object to write trajectories raw values. Must have a
`write` method. It is the caller's responsibility to close `out_f`
when done (if necessary)
"""
out_f.write('Trajectory algorithm: %s\n' % self.algorithm)
raw_f.write('Trajectory algorithm: %s\n' % self.algorithm)
if self.weighted:
out_f.write('** This output is weighted **\n')
raw_f.write('** This output is weighted **\n')
out_f.write('\n')
raw_f.write('\n')
for cat_results in self.categories:
cat_results.to_files(out_f, raw_f)
out_f.write('\n')
raw_f.write('\n')
class GradientANOVA:
r"""Base class for the Trajectory algorithms
Parameters
----------
coords : pandas.DataFrame
The coordinates for each sample id
prop_expl : array like
The numpy 1-D array with the proportion explained by each axis in
coords
metadata_map : pandas.DataFrame
The metadata map, indexed by sample ids and columns are metadata
categories
trajectory_categories : list of str, optional
A list of metadata categories to use to create the trajectories. If
None is passed, the trajectories for all metadata categories are
computed. Default: None, compute all of them
sort_category : str, optional
The metadata category to use to sort the trajectories. Default: None
axes : int, optional
The number of axes to account while doing the trajectory specific
calculations. Pass 0 to compute all of them. Default: 3
weighted : bool, optional
If true, the output is weighted by the space between samples in the
`sort_category` column
Raises
------
ValueError
If any category of `trajectory_categories` is not present in
`metadata_map`
If `sort_category` is not present in `metadata_map`
If `axes` is not between 0 and the maximum number of axes available
If `weighted` is True and no `sort_category` is provided
If `weighted` is True and the values under `sort_category` are not
numerical
If `coords` and `metadata_map` does not have samples in common
"""
# Should be defined by the derived classes
_alg_name = None
@experimental(as_of="0.4.0")
def __init__(self, coords, prop_expl, metadata_map,
trajectory_categories=None, sort_category=None, axes=3,
weighted=False):
if not trajectory_categories:
# If trajectory_categories is not provided, use all the categories
# present in the metadata map
trajectory_categories = metadata_map.keys()
else:
# Check that trajectory_categories are in metadata_map
for category in trajectory_categories:
if category not in metadata_map:
raise ValueError("Category %s not present in metadata."
% category)
# Check that sort_categories is in metadata_map
if sort_category and sort_category not in metadata_map:
raise ValueError("Sort category %s not present in metadata."
% sort_category)
if axes == 0:
# If axes == 0, we should compute the trajectories for all axes
axes = len(prop_expl)
elif axes > len(prop_expl) or axes < 0:
# Axes should be 0 <= axes <= len(prop_expl)
raise ValueError("axes should be between 0 and the max number of "
"axes available (%d), found: %d "
% (len(prop_expl), axes))
# Restrict coordinates to those axes that we actually need to compute
self._coords = coords.loc[:, :axes-1]
self._prop_expl = prop_expl[:axes]
self._metadata_map = metadata_map
self._weighted = weighted
# Remove any samples from coords not present in mapping file
# and remove any samples from metadata_map not present in coords
self._normalize_samples()
# Create groups
self._make_groups(trajectory_categories, sort_category)
# Compute the weighting_vector
self._weighting_vector = None
if weighted:
if not sort_category:
raise ValueError("You should provide a sort category if you "
"want to weight the trajectories")
try:
self._weighting_vector = \
self._metadata_map[sort_category].astype(np.float64)
except ValueError:
raise ValueError("The sorting category must be numeric")
# Initialize the message buffer
self._message_buffer = []
@experimental(as_of="0.4.0")
def get_trajectories(self):
r"""Compute the trajectories for each group in each category and run
ANOVA over the results to test group independence.
Returns
-------
GradientANOVAResults
An instance of GradientANOVAResults holding the results.
"""
result = GradientANOVAResults(self._alg_name, self._weighted, [])
# Loop through all the categories that we should compute
# the trajectories
for cat, cat_groups in self._groups.items():
# Loop through all the category values present in the current
# category and compute the trajectory for each of them
res_by_group = []
for group in sorted(cat_groups, key=lambda k: str(k)):
res_by_group.append(
self._get_group_trajectories(group, cat_groups[group]))
result.categories.append(_ANOVA_trajectories(cat, res_by_group))
return result
def _normalize_samples(self):
r"""Ensures that `self._coords` and `self._metadata_map` have the same
sample ids
Raises
------
ValueError
If `coords` and `metadata_map` does not have samples in common
"""
# Figure out the sample ids in common
coords_sample_ids = set(self._coords.index)
mm_sample_ids = set(self._metadata_map.index)
sample_ids = coords_sample_ids.intersection(mm_sample_ids)
# Check if they actually have sample ids in common
if not sample_ids:
raise ValueError("Coordinates and metadata map had no samples "
"in common")
# Need to take a subset of coords
if coords_sample_ids != sample_ids:
self._coords = self._coords.loc[sample_ids]
# Need to take a subset of metadata_map
if mm_sample_ids != sample_ids:
self._metadata_map = self._metadata_map.loc[sample_ids]
def _make_groups(self, trajectory_categories, sort_category):
r"""Groups the sample ids in `self._metadata_map` by the values in
`trajectory_categories`
Creates `self._groups`, a dictionary keyed by category and values are
dictionaries in which the keys represent the group name within the
category and values are ordered lists of sample ids
If `sort_category` is not None, the sample ids are sorted based on the
values under this category in the metadata map. Otherwise, they are
sorted using the sample id.
Parameters
----------
trajectory_categories : list of str
A list of metadata categories to use to create the groups.
Default: None, compute all of them
sort_category : str or None
The category from self._metadata_map to use to sort groups
"""
# If sort_category is provided, we used the value of such category to
# sort. Otherwise, we use the sample id.
if sort_category:
def sort_val(sid):
return self._metadata_map[sort_category][sid]
else:
def sort_val(sid):
return sid
self._groups = defaultdict(dict)
for cat in trajectory_categories:
# Group samples by category
gb = self._metadata_map.groupby(cat)
for g, df in gb:
self._groups[cat][g] = realsorted(df.index, key=sort_val)
def _get_group_trajectories(self, group_name, sids):
r"""Compute the trajectory results for `group_name` containing the
samples `sids`.
Weights the data if `self._weighted` is True and ``len(sids) > 1``
Parameters
----------
group_name : str
The name of the group
sids : list of str
The sample ids in the group
Returns
-------
GroupResults
The trajectory results for the given group
Raises
------
RuntimeError
If sids is an empty list
"""
# We multiply the coord values with the prop_expl
trajectories = self._coords.loc[sids] * self._prop_expl
if trajectories.empty:
# Raising a RuntimeError since in a usual execution this should
# never happen. The only way this can happen is if the user
# directly calls this method, which shouldn't be done
# (that's why the method is private)
raise RuntimeError("No samples to process, an empty list cannot "
"be processed")
# The weighting can only be done over trajectories with a length
# greater than 1
if self._weighted and len(sids) > 1:
trajectories_copy = deepcopy(trajectories)
try:
trajectories = _weight_by_vector(trajectories_copy,
self._weighting_vector[sids])
except (FloatingPointError, ValueError):
self._message_buffer.append("Could not weight group, no "
"gradient in the the "
"weighting vector.\n")
trajectories = trajectories_copy
return self._compute_trajectories_results(group_name,
trajectories.loc[sids])
def _compute_trajectories_results(self, group_name, trajectories):
r"""Do the actual trajectories computation over trajectories
Parameters
----------
group_name : str
The name of the group
trajectories : pandas.DataFrame
The sorted trajectories for each sample in the group
Raises
------
NotImplementedError
This is the base class
"""
raise NotImplementedError("No algorithm is implemented on the base "
"class.")
class AverageGradientANOVA(GradientANOVA):
r"""Perform trajectory analysis using the RMS average algorithm
For each group in a category, it computes the average point among the
samples in such group and then computes the norm of each sample from the
averaged one.
See Also
--------
GradientANOVA
"""
_alg_name = 'avg'
def _compute_trajectories_results(self, group_name, trajectories):
r"""Do the actual trajectory computation over trajectories
Parameters
----------
group_name : str
The name of the group
trajectories : pandas.DataFrame
The sorted trajectories for each sample in the group
Returns
-------
GroupResults
The trajectory results for `group_name` using the average
trajectories method
"""
center = np.average(trajectories, axis=0)
if len(trajectories) == 1:
trajectory = np.array([np.linalg.norm(center)])
calc = {'avg': trajectory[0]}
else:
trajectory = np.array([np.linalg.norm(row[1].to_numpy() - center)
for row in trajectories.iterrows()])
calc = {'avg': np.average(trajectory)}
msg = ''.join(self._message_buffer) if self._message_buffer else None
# Reset the message buffer
self._message_buffer = []
return GroupResults(group_name, trajectory, np.mean(trajectory),
calc, msg)
class TrajectoryGradientANOVA(GradientANOVA):
r"""Perform trajectory analysis using the RMS trajectory algorithm
For each group in a category, each component of the result trajectory is
computed as taking the sorted list of samples in the group and taking the
norm of the coordinates of the 2nd sample minus 1st sample, 3rd sample
minus 2nd sample and so on.
See Also
--------
GradientANOVA
"""
_alg_name = 'trajectory'
def _compute_trajectories_results(self, group_name, trajectories):
r"""Do the actual trajectory computation over trajectories
Parameters
----------
group_name : str
The name of the group
trajectories : pandas.DataFrame
The sorted trajectories for each sample in the group
Returns
-------
GroupResults
The trajectory results for `group_name` using the trajectory
method
"""
if len(trajectories) == 1:
trajectory = np.array([np.linalg.norm(trajectories)])
calc = {'2-norm': trajectory[0]}
else:
# Loop through all the rows in trajectories and create '2-norm'
# by taking the norm of the 2nd row - 1st row, 3rd row - 2nd row...
trajectory = \
np.array([np.linalg.norm(trajectories.iloc[i+1].to_numpy() -
trajectories.iloc[i].to_numpy())
for i in range(len(trajectories) - 1)])
calc = {'2-norm': np.linalg.norm(trajectory)}
msg = ''.join(self._message_buffer) if self._message_buffer else None
# Reset the message buffer
self._message_buffer = []
return GroupResults(group_name, trajectory, np.mean(trajectory),
calc, msg)
class FirstDifferenceGradientANOVA(GradientANOVA):
r"""Perform trajectory analysis using the first difference algorithm
It calculates the norm for all the time-points and then calculates the
first difference for each resulting point
See Also
--------
GradientANOVA
"""
_alg_name = 'diff'
def _compute_trajectories_results(self, group_name, trajectories):
r"""Do the actual trajectory computation over trajectories
Parameters
----------
group_name : str
The name of the group
trajectories : pandas.DataFrame
The sorted trajectories for each sample in the group
Returns
-------
GroupResults
The trajectory results for `group_name` using the first difference
method
"""
if len(trajectories) == 1:
trajectory = np.array([np.linalg.norm(trajectories)])
calc = {'mean': trajectory[0], 'std': 0}
elif len(trajectories) == 2:
trajectory = np.array([np.linalg.norm(trajectories[1] -
trajectories[0])])
calc = {'mean': trajectory[0], 'std': 0}
else:
vec_norm = \
np.array([np.linalg.norm(trajectories.iloc[i+1].to_numpy() -
trajectories.iloc[i].to_numpy())
for i in range(len(trajectories) - 1)])
trajectory = np.diff(vec_norm)
calc = {'mean': np.mean(trajectory), 'std': np.std(trajectory)}
msg = ''.join(self._message_buffer) if self._message_buffer else None
# Reset the message buffer
self._message_buffer = []
return GroupResults(group_name, trajectory, np.mean(trajectory),
calc, msg)
class WindowDifferenceGradientANOVA(GradientANOVA):
r"""Perform trajectory analysis using the modified first difference
algorithm
It calculates the norm for all the time-points and subtracts the mean of
the next number of elements specified in `window_size` and the current
element.
Parameters
----------
coords : pandas.DataFrame
The coordinates for each sample id
prop_expl : array like
The numpy 1-D array with the proportion explained by each axis in
coords
metadata_map : pandas.DataFrame
The metadata map, indexed by sample ids and columns are metadata
categories
window_size : int or long
The window size to use while computing the differences
Raises
------
ValueError
If the window_size is not a positive integer
See Also
--------
GradientANOVA
"""
_alg_name = 'wdiff'
@experimental(as_of="0.4.0")
def __init__(self, coords, prop_expl, metadata_map, window_size, **kwargs):
super(WindowDifferenceGradientANOVA, self).__init__(coords, prop_expl,
metadata_map,
**kwargs)
if not isinstance(window_size, Integral) or window_size < 1:
raise ValueError("The window_size must be a positive integer")
self._window_size = window_size
def _compute_trajectories_results(self, group_name, trajectories):
r"""Do the actual trajectory computation over trajectories
If the first difference cannot be calculated of the provided window
size, no difference is applied and a message is added to the results.
Parameters
----------
group_name : str
The name of the group
trajectories : pandas.DataFrame
The sorted trajectories for each sample in the group
Returns
-------
GroupResults
The trajectory results for `group_name` using the windowed
difference method
"""
if len(trajectories) == 1:
trajectory = np.array([np.linalg.norm(trajectories)])
calc = {'mean': trajectory, 'std': 0}
elif len(trajectories) == 2:
trajectory = np.array([np.linalg.norm(trajectories[1] -
trajectories[0])])
calc = {'mean': trajectory, 'std': 0}
else:
vec_norm = \
np.array([np.linalg.norm(trajectories.iloc[i+1].to_numpy() -
trajectories.iloc[i].to_numpy())
for i in range(len(trajectories) - 1)])
# windowed first differences won't be able on every group,
# specially given the variation of size that a trajectory tends
# to have
if len(vec_norm) <= self._window_size:
trajectory = vec_norm
self._message_buffer.append("Cannot calculate the first "
"difference with a window of size "
"(%d)." % self._window_size)
else:
# Replicate the last element as many times as required
for idx in range(0, self._window_size):
vec_norm = np.append(vec_norm, vec_norm[-1:], axis=0)
trajectory = []
for idx in range(0, len(vec_norm) - self._window_size):
# Meas has to be over axis 0 so it handles arrays of arrays
element = np.mean(vec_norm[(idx + 1):
(idx + 1 + self._window_size)],
axis=0)
trajectory.append(element - vec_norm[idx])
trajectory = np.array(trajectory)
calc = {'mean': np.mean(trajectory), 'std': np.std(trajectory)}
msg = ''.join(self._message_buffer) if self._message_buffer else None
# Reset the message buffer
self._message_buffer = []
return GroupResults(group_name, trajectory, np.mean(trajectory),
calc, msg)
| gregcaporaso/scikit-bio | skbio/stats/gradient.py | Python | bsd-3-clause | 32,198 | [
"scikit-bio"
] | be34933b6c0164d1a67cc09629d2c5f9c12a80e37015d6a414f382f4cb8bb393 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements a FloatWithUnit, which is a subclass of float. It
also defines supported units for some commonly used units for energy, length,
temperature, time and charge. FloatWithUnit also support conversion to one
another, and additions and subtractions perform automatic conversion if
units are detected. An ArrayWithUnit is also implemented, which is a subclass
of numpy's ndarray with similar unit features.
"""
import collections
import numbers
from functools import partial
import numpy as np
import scipy.constants as const
__author__ = "Shyue Ping Ong, Matteo Giantomassi"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong, Matteo Giantomassi"
__status__ = "Production"
__date__ = "Aug 30, 2013"
"""
Some conversion factors
"""
Ha_to_eV = 1 / const.physical_constants["electron volt-hartree relationship"][0]
eV_to_Ha = 1 / Ha_to_eV
Ry_to_eV = Ha_to_eV / 2
amu_to_kg = const.physical_constants["atomic mass unit-kilogram relationship"][0]
mile_to_meters = const.mile
bohr_to_angstrom = const.physical_constants["Bohr radius"][0] * 1e10
bohr_to_ang = bohr_to_angstrom
ang_to_bohr = 1 / bohr_to_ang
kCal_to_kJ = const.calorie
"""
Definitions of supported units. Values below are essentially scaling and
conversion factors. What matters is the relative values, not the absolute.
The SI units must have factor 1.
"""
BASE_UNITS = {
"length": {
"m": 1,
"km": 1000,
"mile": mile_to_meters,
"ang": 1e-10,
"cm": 1e-2,
"pm": 1e-12,
"bohr": bohr_to_angstrom * 1e-10,
},
"mass": {
"kg": 1,
"g": 1e-3,
"amu": amu_to_kg,
},
"time": {
"s": 1,
"min": 60,
"h": 3600,
"d": 3600 * 24,
},
"current": {
"A": 1
},
"temperature": {
"K": 1,
},
"amount": {
"mol": 1,
"atom": 1 / const.N_A
},
"intensity": {
"cd": 1
},
"memory": {
"byte": 1,
"Kb": 1024,
"Mb": 1024 ** 2,
"Gb": 1024 ** 3,
"Tb": 1024 ** 4,
},
}
# Accept kb, mb, gb ... as well.
BASE_UNITS["memory"].update({k.lower(): v
for k, v in BASE_UNITS["memory"].items()})
# This current list are supported derived units defined in terms of powers of
# SI base units and constants.
DERIVED_UNITS = {
"energy": {
"eV": {"kg": 1, "m": 2, "s": -2, const.e: 1},
"meV": {"kg": 1, "m": 2, "s": -2, const.e * 1e-3: 1},
"Ha": {"kg": 1, "m": 2, "s": -2, const.e * Ha_to_eV: 1},
"Ry": {"kg": 1, "m": 2, "s": -2, const.e * Ry_to_eV: 1},
"J": {"kg": 1, "m": 2, "s": -2},
"kJ": {"kg": 1, "m": 2, "s": -2, 1000: 1},
"kCal": {"kg": 1, "m": 2, "s": -2, 1000: 1, kCal_to_kJ: 1}
},
"charge": {
"C": {"A": 1, "s": 1},
"e": {"A": 1, "s": 1, const.e: 1},
},
"force": {
"N": {"kg": 1, "m": 1, "s": -2},
"KN": {"kg": 1, "m": 1, "s": -2, 1000: 1},
"MN": {"kg": 1, "m": 1, "s": -2, 1e6: 1},
"GN": {"kg": 1, "m": 1, "s": -2, 1e9: 1},
},
"frequency": {
"Hz": {"s": -1},
"KHz": {"s": -1, 1000: 1},
"MHz": {"s": -1, 1e6: 1},
"GHz": {"s": -1, 1e9: 1},
"THz": {"s": -1, 1e12: 1},
},
"pressure": {
"Pa": {"kg": 1, "m": -1, "s": -2},
"KPa": {"kg": 1, "m": -1, "s": -2, 1000: 1},
"MPa": {"kg": 1, "m": -1, "s": -2, 1e6: 1},
"GPa": {"kg": 1, "m": -1, "s": -2, 1e9: 1}
},
"power": {
"W": {"m": 2, "kg": 1, "s": -3},
"KW": {"m": 2, "kg": 1, "s": -3, 1000: 1},
"MW": {"m": 2, "kg": 1, "s": -3, 1e6: 1},
"GW": {"m": 2, "kg": 1, "s": -3, 1e9: 1}
},
"emf": {
"V": {"m": 2, "kg": 1, "s": -3, "A": -1}
},
"capacitance": {
"F": {"m": -2, "kg": -1, "s": 4, "A": 2}
},
"resistance": {
"ohm": {"m": 2, "kg": 1, "s": -3, "A": -2}
},
"conductance": {
"S": {"m": -2, "kg": -1, "s": 3, "A": 2}
},
"magnetic_flux": {
"Wb": {"m": 2, "kg": 1, "s": -2, "A": -1}
},
"cross_section": {
"barn": {"m": 2, 1E-28: 1},
"mbarn": {"m": 2, 1E-31: 1}
}
}
ALL_UNITS = dict(list(BASE_UNITS.items()) + list(DERIVED_UNITS.items())) # type: ignore
SUPPORTED_UNIT_NAMES = tuple([i for d in ALL_UNITS.values() for i in d.keys()])
# Mapping unit name --> unit type (unit names must be unique).
_UNAME2UTYPE = {} # type: ignore
for utype, d in ALL_UNITS.items():
assert not set(d.keys()).intersection(_UNAME2UTYPE.keys())
_UNAME2UTYPE.update({uname: utype for uname in d})
del utype, d
def _get_si_unit(unit):
unit_type = _UNAME2UTYPE[unit]
si_unit = filter(lambda k: BASE_UNITS[unit_type][k] == 1,
BASE_UNITS[unit_type].keys())
return list(si_unit)[0], BASE_UNITS[unit_type][unit]
class UnitError(BaseException):
"""
Exception class for unit errors.
"""
def _check_mappings(u):
for v in DERIVED_UNITS.values():
for k2, v2 in v.items():
if all([v2.get(ku, 0) == vu for ku, vu in u.items()]) and \
all([u.get(kv2, 0) == vv2 for kv2, vv2 in v2.items()]):
return {k2: 1}
return u
class Unit(collections.abc.Mapping):
"""
Represents a unit, e.g., "m" for meters, etc. Supports compound units.
Only integer powers are supported for units.
"""
Error = UnitError
def __init__(self, unit_def):
"""
Constructs a unit.
Args:
unit_def: A definition for the unit. Either a mapping of unit to
powers, e.g., {"m": 2, "s": -1} represents "m^2 s^-1",
or simply as a string "kg m^2 s^-1". Note that the supported
format uses "^" as the power operator and all units must be
space-separated.
"""
if isinstance(unit_def, str):
unit = collections.defaultdict(int)
import re
for m in re.finditer(r"([A-Za-z]+)\s*\^*\s*([\-0-9]*)", unit_def):
p = m.group(2)
p = 1 if not p else int(p)
k = m.group(1)
unit[k] += p
else:
unit = {k: v for k, v in dict(unit_def).items() if v != 0}
self._unit = _check_mappings(unit)
def __mul__(self, other):
new_units = collections.defaultdict(int)
for k, v in self.items():
new_units[k] += v
for k, v in other.items():
new_units[k] += v
return Unit(new_units)
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
new_units = collections.defaultdict(int)
for k, v in self.items():
new_units[k] += v
for k, v in other.items():
new_units[k] -= v
return Unit(new_units)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, i):
return Unit({k: v * i for k, v in self.items()})
def __iter__(self):
return self._unit.__iter__()
def __getitem__(self, i):
return self._unit[i]
def __len__(self):
return len(self._unit)
def __repr__(self):
sorted_keys = sorted(self._unit.keys(),
key=lambda k: (-self._unit[k], k))
return " ".join(["{}^{}".format(k, self._unit[k])
if self._unit[k] != 1 else k
for k in sorted_keys if self._unit[k] != 0])
def __str__(self):
return self.__repr__()
@property
def as_base_units(self):
"""
Converts all units to base SI units, including derived units.
Returns:
(base_units_dict, scaling factor). base_units_dict will not
contain any constants, which are gathered in the scaling factor.
"""
b = collections.defaultdict(int)
factor = 1
for k, v in self.items():
derived = False
for d in DERIVED_UNITS.values():
if k in d:
for k2, v2 in d[k].items():
if isinstance(k2, numbers.Number):
factor *= k2 ** (v2 * v)
else:
b[k2] += v2 * v
derived = True
break
if not derived:
si, f = _get_si_unit(k)
b[si] += v
factor *= f ** v
return {k: v for k, v in b.items() if v != 0}, factor
def get_conversion_factor(self, new_unit):
"""
Returns a conversion factor between this unit and a new unit.
Compound units are supported, but must have the same powers in each
unit type.
Args:
new_unit: The new unit.
"""
uo_base, ofactor = self.as_base_units
un_base, nfactor = Unit(new_unit).as_base_units
units_new = sorted(un_base.items(),
key=lambda d: _UNAME2UTYPE[d[0]])
units_old = sorted(uo_base.items(),
key=lambda d: _UNAME2UTYPE[d[0]])
factor = ofactor / nfactor
for uo, un in zip(units_old, units_new):
if uo[1] != un[1]:
raise UnitError("Units %s and %s are not compatible!" % (uo, un))
c = ALL_UNITS[_UNAME2UTYPE[uo[0]]]
factor *= (c[uo[0]] / c[un[0]]) ** uo[1]
return factor
class FloatWithUnit(float):
"""
Subclasses float to attach a unit type. Typically, you should use the
pre-defined unit type subclasses such as Energy, Length, etc. instead of
using FloatWithUnit directly.
Supports conversion, addition and subtraction of the same unit type. E.g.,
1 m + 20 cm will be automatically converted to 1.2 m (units follow the
leftmost quantity). Note that FloatWithUnit does not override the eq
method for float, i.e., units are not checked when testing for equality.
The reason is to allow this class to be used transparently wherever floats
are expected.
>>> e = Energy(1.1, "Ha")
>>> a = Energy(1.1, "Ha")
>>> b = Energy(3, "eV")
>>> c = a + b
>>> print(c)
1.2102479761938871 Ha
>>> c.to("eV")
32.932522246000005 eV
"""
Error = UnitError
@classmethod
def from_string(cls, s):
"""
Initialize a FloatWithUnit from a string. Example Memory.from_string("1. Mb")
"""
# Extract num and unit string.
s = s.strip()
for i, char in enumerate(s):
if char.isalpha() or char.isspace():
break
else:
raise Exception("Unit is missing in string %s" % s)
num, unit = float(s[:i]), s[i:]
# Find unit type (set it to None if it cannot be detected)
for unit_type, d in BASE_UNITS.items():
if unit in d:
break
else:
unit_type = None
return cls(num, unit, unit_type=unit_type)
def __new__(cls, val, unit, unit_type=None):
"""Overrides __new__ since we are subclassing a Python primitive/"""
new = float.__new__(cls, val)
new._unit = Unit(unit)
new._unit_type = unit_type
return new
def __init__(self, val, unit, unit_type=None):
"""
Initializes a float with unit.
Args:
val (float): Value
unit (Unit): A unit. E.g., "C".
unit_type (str): A type of unit. E.g., "charge"
"""
if unit_type is not None and str(unit) not in ALL_UNITS[unit_type]:
raise UnitError(
"{} is not a supported unit for {}".format(unit, unit_type))
self._unit = Unit(unit)
self._unit_type = unit_type
def __repr__(self):
return super().__repr__()
def __str__(self):
s = super().__str__()
return "{} {}".format(s, self._unit)
def __add__(self, other):
if not hasattr(other, "unit_type"):
return super().__add__(other)
if other.unit_type != self._unit_type:
raise UnitError("Adding different types of units is not allowed")
val = other
if other.unit != self._unit:
val = other.to(self._unit)
return FloatWithUnit(float(self) + val, unit_type=self._unit_type,
unit=self._unit)
def __sub__(self, other):
if not hasattr(other, "unit_type"):
return super().__sub__(other)
if other.unit_type != self._unit_type:
raise UnitError("Subtracting different units is not allowed")
val = other
if other.unit != self._unit:
val = other.to(self._unit)
return FloatWithUnit(float(self) - val, unit_type=self._unit_type,
unit=self._unit)
def __mul__(self, other):
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(float(self) * other,
unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(float(self) * other, unit_type=None,
unit=self._unit * other._unit)
def __rmul__(self, other):
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(float(self) * other,
unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(float(self) * other, unit_type=None,
unit=self._unit * other._unit)
def __pow__(self, i):
return FloatWithUnit(float(self) ** i, unit_type=None,
unit=self._unit ** i)
def __truediv__(self, other):
val = super().__truediv__(other)
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(val, unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(val, unit_type=None,
unit=self._unit / other._unit)
def __neg__(self):
return FloatWithUnit(super().__neg__(),
unit_type=self._unit_type,
unit=self._unit)
def __getnewargs__(self):
"""Function used by pickle to recreate object."""
# print(self.__dict__)
# FIXME
# There's a problem with _unit_type if we try to unpickle objects from file.
# since self._unit_type might not be defined. I think this is due to
# the use of decorators (property and unitized). In particular I have problems with "amu"
# likely due to weight in core.composition
if hasattr(self, "_unit_type"):
args = float(self), self._unit, self._unit_type
else:
args = float(self), self._unit, None
return args
def __getstate__(self):
state = self.__dict__.copy()
state["val"] = float(self)
# print("in getstate %s" % state)
return state
def __setstate__(self, state):
# print("in setstate %s" % state)
self._unit = state["_unit"]
@property
def unit_type(self) -> str:
"""
:return: The type of unit. Energy, Charge, etc.
"""
return self._unit_type
@property
def unit(self) -> str:
"""
:return: The unit, e.g., "eV".
"""
return self._unit
def to(self, new_unit):
"""
Conversion to a new_unit. Right now, only supports 1 to 1 mapping of
units of each type.
Args:
new_unit: New unit type.
Returns:
A FloatWithUnit object in the new units.
Example usage:
>>> e = Energy(1.1, "eV")
>>> e = Energy(1.1, "Ha")
>>> e.to("eV")
29.932522246 eV
"""
return FloatWithUnit(
self * self.unit.get_conversion_factor(new_unit),
unit_type=self._unit_type,
unit=new_unit)
@property
def as_base_units(self):
"""
Returns this FloatWithUnit in base SI units, including derived units.
Returns:
A FloatWithUnit object in base SI units
"""
return self.to(self.unit.as_base_units[0])
@property
def supported_units(self):
"""
Supported units for specific unit type.
"""
return tuple(ALL_UNITS[self._unit_type].keys())
class ArrayWithUnit(np.ndarray):
"""
Subclasses `numpy.ndarray` to attach a unit type. Typically, you should
use the pre-defined unit type subclasses such as EnergyArray,
LengthArray, etc. instead of using ArrayWithFloatWithUnit directly.
Supports conversion, addition and subtraction of the same unit type. E.g.,
1 m + 20 cm will be automatically converted to 1.2 m (units follow the
leftmost quantity).
>>> a = EnergyArray([1, 2], "Ha")
>>> b = EnergyArray([1, 2], "eV")
>>> c = a + b
>>> print(c)
[ 1.03674933 2.07349865] Ha
>>> c.to("eV")
array([ 28.21138386, 56.42276772]) eV
"""
Error = UnitError
def __new__(cls, input_array, unit, unit_type=None):
"""
Override __new__.
"""
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array).view(cls)
# add the new attributes to the created instance
obj._unit = Unit(unit)
obj._unit_type = unit_type
return obj
def __array_finalize__(self, obj):
"""
See http://docs.scipy.org/doc/numpy/user/basics.subclassing.html for
comments.
"""
if obj is None:
return
self._unit = getattr(obj, "_unit", None)
self._unit_type = getattr(obj, "_unit_type", None)
@property
def unit_type(self) -> str:
"""
:return: The type of unit. Energy, Charge, etc.
"""
return self._unit_type
@property
def unit(self) -> str:
"""
:return: The unit, e.g., "eV".
"""
return self._unit
def __reduce__(self):
# print("in reduce")
reduce = list(super().__reduce__())
# print("unit",self._unit)
# print(reduce[2])
reduce[2] = {"np_state": reduce[2], "_unit": self._unit}
return tuple(reduce)
def __setstate__(self, state):
# print("in setstate %s" % str(state))
super().__setstate__(state["np_state"])
self._unit = state["_unit"]
def __repr__(self):
return "{} {}".format(np.array(self).__repr__(), self.unit)
def __str__(self):
return "{} {}".format(np.array(self).__str__(), self.unit)
def __add__(self, other):
if hasattr(other, "unit_type"):
if other.unit_type != self.unit_type:
raise UnitError("Adding different types of units is"
" not allowed")
if other.unit != self.unit:
other = other.to(self.unit)
return self.__class__(np.array(self) + np.array(other),
unit_type=self.unit_type, unit=self.unit)
def __sub__(self, other):
if hasattr(other, "unit_type"):
if other.unit_type != self.unit_type:
raise UnitError("Subtracting different units is not allowed")
if other.unit != self.unit:
other = other.to(self.unit)
return self.__class__(np.array(self) - np.array(other),
unit_type=self.unit_type, unit=self.unit)
def __mul__(self, other):
# FIXME
# Here we have the most important difference between FloatWithUnit and
# ArrayWithFloatWithUnit:
# If other does not have units, I return an object with the same units
# as self.
# if other *has* units, I return an object *without* units since
# taking into account all the possible derived quantities would be
# too difficult.
# Moreover Energy(1.0) * Time(1.0, "s") returns 1.0 Ha that is a
# bit misleading.
# Same protocol for __div__
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__mul__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
# Cannot use super since it returns an instance of self.__class__
# while here we want a bare numpy array.
return self.__class__(
np.array(self).__mul__(np.array(other)),
unit=self.unit * other.unit)
def __rmul__(self, other):
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__rmul__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
return self.__class__(
np.array(self).__rmul__(np.array(other)),
unit=self.unit * other.unit)
def __div__(self, other):
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__div__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
return self.__class__(
np.array(self).__div__(np.array(other)),
unit=self.unit / other.unit)
def __truediv__(self, other):
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__truediv__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
return self.__class__(
np.array(self).__truediv__(np.array(other)),
unit=self.unit / other.unit)
def __neg__(self):
return self.__class__(np.array(self).__neg__(),
unit_type=self.unit_type, unit=self.unit)
def to(self, new_unit):
"""
Conversion to a new_unit.
Args:
new_unit:
New unit type.
Returns:
A ArrayWithFloatWithUnit object in the new units.
Example usage:
>>> e = EnergyArray([1, 1.1], "Ha")
>>> e.to("eV")
array([ 27.21138386, 29.93252225]) eV
"""
return self.__class__(
np.array(self) * self.unit.get_conversion_factor(new_unit),
unit_type=self.unit_type, unit=new_unit)
@property
def as_base_units(self):
"""
Returns this ArrayWithUnit in base SI units, including derived units.
Returns:
An ArrayWithUnit object in base SI units
"""
return self.to(self.unit.as_base_units[0])
# TODO abstract base class property?
@property
def supported_units(self):
"""
Supported units for specific unit type.
"""
return ALL_UNITS[self.unit_type]
# TODO abstract base class method?
def conversions(self):
"""
Returns a string showing the available conversions.
Useful tool in interactive mode.
"""
return "\n".join(str(self.to(unit)) for unit in self.supported_units)
def _my_partial(func, *args, **kwargs):
"""
Partial returns a partial object and therefore we cannot inherit class
methods defined in FloatWithUnit. This function calls partial and patches
the new class before returning.
"""
newobj = partial(func, *args, **kwargs)
# monkey patch
newobj.from_string = FloatWithUnit.from_string
return newobj
Energy = partial(FloatWithUnit, unit_type="energy")
"""
A float with an energy unit.
Args:
val (float): Value
unit (Unit): E.g., eV, kJ, etc. Must be valid unit or UnitError is raised.
"""
EnergyArray = partial(ArrayWithUnit, unit_type="energy")
Length = partial(FloatWithUnit, unit_type="length")
"""
A float with a length unit.
Args:
val (float): Value
unit (Unit): E.g., m, ang, bohr, etc. Must be valid unit or UnitError is
raised.
"""
LengthArray = partial(ArrayWithUnit, unit_type="length")
Mass = partial(FloatWithUnit, unit_type="mass")
"""
A float with a mass unit.
Args:
val (float): Value
unit (Unit): E.g., amu, kg, etc. Must be valid unit or UnitError is
raised.
"""
MassArray = partial(ArrayWithUnit, unit_type="mass")
Temp = partial(FloatWithUnit, unit_type="temperature")
"""
A float with a temperature unit.
Args:
val (float): Value
unit (Unit): E.g., K. Only K (kelvin) is supported.
"""
TempArray = partial(ArrayWithUnit, unit_type="temperature")
Time = partial(FloatWithUnit, unit_type="time")
"""
A float with a time unit.
Args:
val (float): Value
unit (Unit): E.g., s, min, h. Must be valid unit or UnitError is
raised.
"""
TimeArray = partial(ArrayWithUnit, unit_type="time")
Charge = partial(FloatWithUnit, unit_type="charge")
"""
A float with a charge unit.
Args:
val (float): Value
unit (Unit): E.g., C, e (electron charge). Must be valid unit or UnitError
is raised.
"""
ChargeArray = partial(ArrayWithUnit, unit_type="charge")
Memory = _my_partial(FloatWithUnit, unit_type="memory")
"""
A float with a memory unit.
Args:
val (float): Value
unit (Unit): E.g., Kb, Mb, Gb, Tb. Must be valid unit or UnitError
is raised.
"""
def obj_with_unit(obj, unit):
"""
Returns a `FloatWithUnit` instance if obj is scalar, a dictionary of
objects with units if obj is a dict, else an instance of
`ArrayWithFloatWithUnit`.
Args:
unit: Specific units (eV, Ha, m, ang, etc.).
"""
unit_type = _UNAME2UTYPE[unit]
if isinstance(obj, numbers.Number):
return FloatWithUnit(obj, unit=unit, unit_type=unit_type)
if isinstance(obj, collections.Mapping):
return {k: obj_with_unit(v, unit) for k, v in obj.items()}
return ArrayWithUnit(obj, unit=unit, unit_type=unit_type)
def unitized(unit):
"""
Useful decorator to assign units to the output of a function. You can also
use it to standardize the output units of a function that already returns
a FloatWithUnit or ArrayWithUnit. For sequences, all values in the sequences
are assigned the same unit. It works with Python sequences only. The creation
of numpy arrays loses all unit information. For mapping types, the values
are assigned units.
Args:
unit: Specific unit (eV, Ha, m, ang, etc.).
Example usage::
@unitized(unit="kg")
def get_mass():
return 123.45
"""
def wrap(f):
def wrapped_f(*args, **kwargs):
val = f(*args, **kwargs)
unit_type = _UNAME2UTYPE[unit]
if isinstance(val, (FloatWithUnit, ArrayWithUnit)):
return val.to(unit)
if isinstance(val, collections.abc.Sequence):
# TODO: why don't we return a ArrayWithUnit?
# This complicated way is to ensure the sequence type is
# preserved (list or tuple).
return val.__class__([FloatWithUnit(i, unit_type=unit_type,
unit=unit) for i in val])
if isinstance(val, collections.abc.Mapping):
for k, v in val.items():
val[k] = FloatWithUnit(v, unit_type=unit_type, unit=unit)
elif isinstance(val, numbers.Number):
return FloatWithUnit(val, unit_type=unit_type, unit=unit)
elif val is None:
pass
else:
raise TypeError("Don't know how to assign units to %s" % str(val))
return val
return wrapped_f
return wrap
if __name__ == "__main__":
import doctest
doctest.testmod()
| mbkumar/pymatgen | pymatgen/core/units.py | Python | mit | 27,902 | [
"pymatgen"
] | e42aa474207f9c308c185821895cb7e6281fcd9d9b2b51918278352b630fbb8e |
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
import collections
import logging
import threading
import time
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super().add_result(future)
self.event.set()
def add_exception(self, future):
super().add_exception(future)
self.event.set()
def add_cancelled(self, future):
super().add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
self.lock = threading.Lock()
super().__init__()
def _decrement_pending_calls(self):
with self.lock:
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super().add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super().add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super().add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions."""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled). If any given Futures are duplicated, they will be returned
once.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
fs = set(fs)
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = fs - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
try:
yield from finished
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), len(fs)))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
finally:
for f in fs:
with f._condition:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = collections.namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
with f._condition:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class Future(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._waiters = []
self._done_callbacks = []
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except Exception:
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<Future at %s state=%s raised %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<Future at %s state=%s returned %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<Future at %s state=%s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future was cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
def __get_result(self):
if self._exception:
raise self._exception
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
"""
with self._condition:
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
else:
raise TimeoutError()
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though calls
to as_completed() or wait()) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if this method was already called or if set_result()
or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self),
self._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
a Future instance representing the execution of the callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, timeout=None):
"""Returns a iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in zip(*iterables)]
# Yield must be hidden in closure so that the futures are submitted
# before the first iterator value is required.
def result_iterator():
try:
for future in fs:
if timeout is None:
yield future.result()
else:
yield future.result(end_time - time.time())
finally:
for future in fs:
future.cancel()
return result_iterator()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
| Orav/kbengine | kbe/src/lib/python/Lib/concurrent/futures/_base.py | Python | lgpl-3.0 | 20,213 | [
"Brian"
] | 36bfef7bd2742fe718326af6fb5a3636caea0667b66d27435a829e3862d255f5 |
class Solution(object):
def findItinerary(self, tickets):
"""
:type tickets: List[List[str]]
:rtype: List[str]
"""
from collections import defaultdict
flights = defaultdict(list)
for a, b in sorted(tickets)[::-1]:
flights[a] += b,
route = []
def visit(airport):
while flights[airport]:
visit(flights[airport].pop())
route.append(airport)
visit('JFK')
return route[::-1]
| ChuanleiGuo/AlgorithmsPlayground | LeetCodeSolutions/python/332_Reconstruct_Itinerary.py | Python | mit | 518 | [
"VisIt"
] | d644880fb87ae5a7e4c9ccc51b628e4a7df9bd757a463aaea194838910747b83 |
import numpy as np
import unittest
import discretize
from SimPEG.maps import Wires
from SimPEG.utils import (
mkvc,
WeightedGaussianMixture,
GaussianMixtureWithPrior,
)
from scipy.stats import norm, multivariate_normal
class TestGMMs(unittest.TestCase):
def setUp(self):
np.random.seed(518936)
# Create a cloud of random points from a random gaussian mixture
self.ndim = 2
self.n_components = 2
sigma = np.random.randn(self.n_components, self.ndim, self.ndim)
sigma = np.c_[[sigma[i].dot(sigma[i].T) for i in range(sigma.shape[0])]]
sigma[0] += np.eye(self.ndim)
sigma[1] += np.eye(self.ndim) - 0.25 * np.eye(self.ndim).transpose((1, 0))
self.sigma = sigma
self.means = (
np.abs(np.random.randn(self.ndim, self.ndim)) * np.c_[[100.0, -100.0]]
)
self.rv0 = multivariate_normal(self.means[0], self.sigma[0])
self.rv1 = multivariate_normal(self.means[1], self.sigma[1])
self.proportions = np.r_[0.6, 0.4]
self.nsample = 1000
self.s0 = self.rv0.rvs(int(self.nsample * self.proportions[0]))
self.s1 = self.rv1.rvs(int(self.nsample * self.proportions[1]))
self.samples = np.r_[self.s0, self.s1]
self.model = mkvc(self.samples)
self.mesh = discretize.TensorMesh(
[np.maximum(1e-1, np.random.randn(self.nsample) ** 2.0)]
)
self.wires = Wires(("s0", self.mesh.nC), ("s1", self.mesh.nC))
self.PlotIt = False
def test_weighted_gaussian_mixture_multicomponents_multidimensions(self):
clf = WeightedGaussianMixture(
mesh=self.mesh,
n_components=self.n_components,
covariance_type="full",
max_iter=1000,
n_init=20,
tol=1e-8,
means_init=self.means,
warm_start=True,
precisions_init=np.linalg.inv(self.sigma),
weights_init=self.proportions,
)
clf.fit(self.samples)
checking_means = np.c_[
np.average(
self.s0, axis=0, weights=self.mesh.cell_volumes[: self.s0.shape[0]]
),
np.average(
self.s1, axis=0, weights=self.mesh.cell_volumes[self.s0.shape[0] :]
),
].T
checking_covariances = np.r_[
np.cov(
self.s0.T, ddof=0, aweights=self.mesh.cell_volumes[: self.s0.shape[0]]
),
np.cov(
self.s1.T, ddof=0, aweights=self.mesh.cell_volumes[self.s0.shape[0] :]
),
].reshape(clf.covariances_.shape)
checking_proportions = np.r_[
self.mesh.cell_volumes[: self.s0.shape[0]].sum(),
self.mesh.cell_volumes[self.s0.shape[0] :].sum(),
]
checking_proportions /= checking_proportions.sum()
self.assertTrue(np.all(np.isclose(clf.means_, checking_means)))
self.assertTrue(np.all(np.isclose(clf.covariances_, checking_covariances)))
self.assertTrue(np.all(np.isclose(clf.weights_, checking_proportions)))
print(
"WeightedGaussianMixture is estimating correctly in 2D with 2 components."
)
def test_weighted_gaussian_mixture_one_component_1d(self):
model1d = self.wires.s0 * self.model
clf = WeightedGaussianMixture(
mesh=self.mesh,
n_components=1,
covariance_type="full",
max_iter=1000,
n_init=10,
tol=1e-8,
warm_start=True,
)
clf.fit(model1d.reshape(-1, 1))
cheching_mean = np.average(model1d, weights=self.mesh.cell_volumes)
checking_covariance = np.cov(model1d, ddof=0, aweights=self.mesh.cell_volumes)
self.assertTrue(np.isclose(clf.means_[0], cheching_mean))
self.assertTrue(np.isclose(clf.covariances_[0], checking_covariance))
print("WeightedGaussianMixture is estimating correctly in 1D with 1 component.")
def test_MAP_estimate_one_component_1d(self):
# subsample mesh and model between mle and prior
n_samples = int(self.nsample * self.proportions.min())
model_map = self.wires.s0 * self.model
model_mle = model_map[:n_samples]
model_prior = model_map[-n_samples:]
actv = np.zeros(self.mesh.nC, dtype="bool")
actv[:n_samples] = np.ones(n_samples, dtype="bool")
clfref = WeightedGaussianMixture(
mesh=self.mesh,
actv=actv,
n_components=1,
covariance_type="full",
max_iter=1000,
n_init=10,
tol=1e-8,
warm_start=True,
)
clfref.fit(model_prior.reshape(-1, 1))
clf = GaussianMixtureWithPrior(
gmmref=clfref,
max_iter=1000,
n_init=10,
tol=1e-8,
warm_start=True,
nu=1,
kappa=1,
zeta=1,
prior_type="full",
update_covariances=True,
)
clf.fit(model_mle.reshape(-1, 1))
checking_means = np.average(
np.r_[model_mle, model_prior],
weights=np.r_[self.mesh.cell_volumes[actv], self.mesh.cell_volumes[actv]],
)
checking_covariance = np.cov(
np.r_[model_mle, model_prior],
ddof=0,
aweights=np.r_[self.mesh.cell_volumes[actv], self.mesh.cell_volumes[actv]],
)
self.assertTrue(np.isclose(checking_covariance, clf.covariances_))
self.assertTrue(np.isclose(checking_means, clf.means_))
print(
"GaussianMixtureWithPrior is fully-MAP-estimating correctly in 1D with 1 component."
)
clfsemi = GaussianMixtureWithPrior(
gmmref=clfref,
max_iter=1000,
n_init=10,
tol=1e-8,
warm_start=True,
nu=1,
kappa=1,
zeta=1,
prior_type="semi",
update_covariances=True,
)
clfsemi.fit(model_mle.reshape(-1, 1))
checking_means_semi = np.average(
np.r_[model_mle, model_prior],
weights=np.r_[self.mesh.cell_volumes[actv], self.mesh.cell_volumes[actv]],
)
checking_covariance_semi = 0.5 * np.cov(
model_mle, ddof=0, aweights=self.mesh.cell_volumes[actv]
) + 0.5 * np.cov(model_prior, ddof=0, aweights=self.mesh.cell_volumes[actv])
self.assertTrue(np.isclose(checking_covariance_semi, clfsemi.covariances_))
self.assertTrue(np.isclose(checking_means_semi, clfsemi.means_))
print(
"GaussianMixtureWithPrior is semi-MAP-estimating correctly in 1D with 1 component."
)
def test_MAP_estimate_multi_component_multidimensions(self):
# prior model at three-quarter-way the means and identity covariances
model_prior = (
np.random.randn(*self.samples.shape)
+ 0.9 * self.means[np.random.choice(2, size=self.nsample, p=[0.9, 0.1])]
)
clfref = WeightedGaussianMixture(
mesh=self.mesh,
n_components=self.n_components,
covariance_type="full",
max_iter=1000,
n_init=10,
tol=1e-8,
warm_start=True,
)
clfref.fit(model_prior)
clfref.order_clusters_GM_weight()
clf = GaussianMixtureWithPrior(
gmmref=clfref,
max_iter=1000,
n_init=100,
tol=1e-10,
nu=1,
kappa=1,
zeta=1,
prior_type="semi",
update_covariances=True,
)
clf.fit(self.samples)
# This is a rough estimate of the multidimensional, multi-components means
checking_means = np.c_[
(
clf.weights_[0]
* np.average(
self.s0, axis=0, weights=self.mesh.cell_volumes[: self.s0.shape[0]]
)
+ clfref.weights_[0] * clfref.means_[0]
)
/ (clf.weights_[0] + clfref.weights_[0]),
(
clf.weights_[1]
* np.average(
self.s1, axis=0, weights=self.mesh.cell_volumes[self.s0.shape[0] :]
)
+ clfref.weights_[1] * clfref.means_[1]
)
/ (clf.weights_[1] + clfref.weights_[1]),
].T
self.assertTrue(np.all(np.isclose(checking_means, clf.means_, rtol=1e-2)))
# This is a rough estimate of the multidimensional, multi-components covariances_
checking_covariances = np.r_[
(
clf.weights_[0]
* np.cov(
self.s0.T,
ddof=0,
aweights=self.mesh.cell_volumes[: self.s0.shape[0]],
)
+ clfref.weights_[0] * clfref.covariances_[0]
)
/ (clf.weights_[0] + clfref.weights_[0]),
(
clf.weights_[1]
* np.cov(
self.s1.T,
ddof=0,
aweights=self.mesh.cell_volumes[self.s0.shape[0] :],
)
+ clfref.weights_[1] * clfref.covariances_[1]
)
/ (clf.weights_[1] + clfref.weights_[1]),
].reshape(clf.covariances_.shape)
self.assertTrue(
np.all(np.isclose(checking_covariances, clf.covariances_, rtol=0.15))
)
checking_proportions = np.r_[
self.mesh.cell_volumes[: self.s0.shape[0]].sum()
+ clfref.weights_[0] * self.mesh.cell_volumes.sum(),
self.mesh.cell_volumes[self.s0.shape[0] :].sum()
+ +clfref.weights_[1] * self.mesh.cell_volumes.sum(),
]
checking_proportions /= checking_proportions.sum()
self.assertTrue(np.all(np.isclose(checking_proportions, clf.weights_)))
print(
"GaussianMixtureWithPrior is semi-MAP-estimating correctly in 2D with 2 components."
)
if __name__ == "__main__":
unittest.main()
| simpeg/simpeg | tests/utils/test_gmm_utils.py | Python | mit | 10,138 | [
"Gaussian"
] | 78c6848548b0d79953819d34346142e6f79b7a7e3d5a1c8032b87a0c8b3c3826 |
# coding: utf-8
"""Unit tests for sending e-mails."""
from __future__ import unicode_literals
import logging
from django.conf import settings
from django.conf.urls import include, url
from django.contrib.auth.models import User
from django.core import mail
from django.core.urlresolvers import clear_url_caches, reverse
from django.test.utils import override_settings
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.six.moves import range
from djblets.mail.testing import DmarcDnsTestsMixin
from djblets.mail.utils import (build_email_address,
build_email_address_for_user)
from djblets.siteconfig.models import SiteConfiguration
from djblets.testing.decorators import add_fixtures
from kgb import SpyAgency
from reviewboard.accounts.models import ReviewRequestVisit
from reviewboard.admin.server import build_server_url, get_server_url
from reviewboard.admin.siteconfig import load_site_config, settings_map
from reviewboard.diffviewer.models import FileDiff
from reviewboard.notifications.email.message import \
prepare_base_review_request_mail
from reviewboard.notifications.email.utils import (
get_email_addresses_for_group,
send_email)
from reviewboard.reviews.models import (Group,
Review,
ReviewRequest,
ReviewRequestDraft)
from reviewboard.scmtools.core import PRE_CREATION
from reviewboard.site.models import LocalSite
from reviewboard.testing import TestCase
from reviewboard.webapi.models import WebAPIToken
_CONSOLE_EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
urlpatterns = [
url(r'^site-root/', include('reviewboard.urls')),
]
class SiteRootURLTestsMixin(object):
"""A mixin for TestCases that helps test URLs generated with site roots.
This mixin provides some settings for unit tests to help ensure that URLs
generated in e-mails are done so correctly and to test that the site root
is only present in those e-mails once.
.. seealso:: `Bug 4612`_
.. _Bug 4612: https://reviews.reviewboard.org/r/9448/bugs/4612/
"""
CUSTOM_SITE_ROOT = '/site-root/'
BAD_SITE_ROOT = '/site-root//site-root/'
CUSTOM_SITE_ROOT_SETTINGS = {
'SITE_ROOT': '/site-root/',
'ROOT_URLCONF': 'reviewboard.notifications.tests.test_email_sending',
}
@classmethod
def setUpClass(cls):
super(SiteRootURLTestsMixin, cls).setUpClass()
clear_url_caches()
def tearDown(self):
super(SiteRootURLTestsMixin, self).tearDown()
clear_url_caches()
class EmailTestHelper(object):
email_siteconfig_settings = {}
def setUp(self):
super(EmailTestHelper, self).setUp()
mail.outbox = []
self.sender = 'noreply@example.com'
self._old_email_settings = {}
if self.email_siteconfig_settings:
siteconfig = SiteConfiguration.objects.get_current()
needs_reload = False
for key, value in six.iteritems(self.email_siteconfig_settings):
old_value = siteconfig.get(key)
if old_value != value:
self._old_email_settings[key] = old_value
siteconfig.set(key, value)
if key in settings_map:
needs_reload = True
if self._old_email_settings:
siteconfig.save()
if needs_reload:
load_site_config()
def tearDown(self):
super(EmailTestHelper, self).tearDown()
if self._old_email_settings:
siteconfig = SiteConfiguration.objects.get_current()
needs_reload = False
for key, value in six.iteritems(self._old_email_settings):
self._old_email_settings[key] = siteconfig.get(key)
siteconfig.set(key, value)
if key in settings_map:
needs_reload = True
siteconfig.save()
if needs_reload:
load_site_config()
def assertValidRecipients(self, user_list, group_list=[]):
recipient_list = mail.outbox[0].to + mail.outbox[0].cc
self.assertEqual(len(recipient_list), len(user_list) + len(group_list))
for user in user_list:
self.assertTrue(build_email_address_for_user(
User.objects.get(username=user)) in recipient_list,
"user %s was not found in the recipient list" % user)
groups = Group.objects.filter(name__in=group_list, local_site=None)
for group in groups:
for address in get_email_addresses_for_group(group):
self.assertTrue(
address in recipient_list,
"group %s was not found in the recipient list" % address)
class UserEmailTestsMixin(EmailTestHelper):
"""A mixin for user-related e-mail tests."""
email_siteconfig_settings = {
'mail_send_new_user_mail': True,
}
def _register(self, username='NewUser', password1='password',
password2='password', email='newuser@example.com',
first_name='New', last_name='User'):
fields = {
'username': username,
'password1': password1,
'password2': password2,
'email': email,
'first_name': first_name,
'last_name': last_name,
}
register_url = reverse('register')
# We have to first get the register page so that the CSRF cookie is
# set.
self.client.get(register_url)
self.client.post(register_url, fields)
class UserEmailTests(UserEmailTestsMixin, TestCase):
"""User e-mail tests."""
def test_new_user_email(self):
"""Testing sending an e-mail after a new user has successfully
registered
"""
self._register()
siteconfig = SiteConfiguration.objects.get_current()
admin_name = siteconfig.get('site_admin_name')
admin_email_addr = siteconfig.get('site_admin_email')
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.subject,
"New Review Board user registration for NewUser")
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.extra_headers['From'],
settings.DEFAULT_FROM_EMAIL)
self.assertEqual(email.to[0],
build_email_address(full_name=admin_name,
email=admin_email_addr))
class UserEmailSiteRootURLTests(SiteRootURLTestsMixin, UserEmailTestsMixin,
TestCase):
"""Tests for Bug 4612 related to user e-mails.
User account e-mails do not include anything with a Local Site, so there
is no reason to tests the Local Site case.
"""
@override_settings(**SiteRootURLTestsMixin.CUSTOM_SITE_ROOT_SETTINGS)
def test_new_user_email_site_root_custom(self):
"""Testing new user e-mail includes site root in e-mails only once with
custom site root
"""
self._register()
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertNotIn(self.BAD_SITE_ROOT, email.body)
for alternative in email.alternatives:
self.assertNotIn(self.BAD_SITE_ROOT, alternative[0])
def test_new_user_email_site_root_default(self):
"""Testing new user e-mail includes site root in e-mails only once with
default site root
"""
self._register()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertNotIn('example.com//', message.body)
for alternative in message.alternatives:
self.assertNotIn('example.com//', alternative[0])
class ReviewRequestEmailTestsMixin(EmailTestHelper):
"""A mixin for review request-related and review-related e-mail tests."""
fixtures = ['test_users']
email_siteconfig_settings = {
'mail_send_review_mail': True,
'mail_default_from': 'noreply@example.com',
'mail_from_spoofing': 'smart',
}
class ReviewRequestEmailTests(ReviewRequestEmailTestsMixin, DmarcDnsTestsMixin,
SpyAgency, TestCase):
"""Tests for review and review request e-mails."""
def test_new_review_request_email(self):
"""Testing sending an e-mail when creating a new review request"""
review_request = self.create_review_request(
summary='My test review request')
review_request.target_people.add(User.objects.get(username='grumpy'))
review_request.target_people.add(User.objects.get(username='doc'))
review_request.publish(review_request.submitter)
from_email = build_email_address_for_user(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertEqual(mail.outbox[0].subject,
'Review Request %s: My test review request'
% review_request.pk)
self.assertValidRecipients(['grumpy', 'doc'])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_new_review_request_with_from_spoofing_always(self):
"""Testing sending an e-mail when creating a new review request with
mail_from_spoofing=always
"""
self.dmarc_txt_records['_dmarc.example.com'] = 'v=DMARC1; p=reject;'
review_request = self.create_review_request(
summary='My test review request')
review_request.target_people.add(
*User.objects.filter(username__in=('doc', 'grumpy')))
settings = {
'mail_from_spoofing': 'always',
}
with self.siteconfig_settings(settings):
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'],
'Doc Dwarf <doc@example.com>')
self.assertEqual(mail.outbox[0].subject,
'Review Request %s: My test review request'
% review_request.pk)
self.assertValidRecipients(['grumpy', 'doc'])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_new_review_request_with_from_spoofing_never(self):
"""Testing sending an e-mail when creating a new review request with
mail_from_spoofing=never
"""
review_request = self.create_review_request(
summary='My test review request')
review_request.target_people.add(
*User.objects.filter(username__in=('doc', 'grumpy')))
settings = {
'mail_from_spoofing': 'never',
}
with self.siteconfig_settings(settings):
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'],
'Doc Dwarf via Review Board <noreply@example.com>')
self.assertEqual(mail.outbox[0].subject,
'Review Request %s: My test review request'
% review_request.pk)
self.assertValidRecipients(['grumpy', 'doc'])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_new_review_request_email_with_from_spoofing_auto(self):
"""Testing sending an e-mail when creating a new review request with
mail_from_spoofing=auto and allowed by DMARC
"""
self.dmarc_txt_records['_dmarc.example.com'] = 'v=DMARC1; p=none;'
review_request = self.create_review_request(
summary='My test review request')
review_request.target_people.add(
*User.objects.filter(username__in=('doc', 'grumpy')))
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'],
'Doc Dwarf <doc@example.com>')
self.assertEqual(mail.outbox[0].subject,
'Review Request %s: My test review request'
% review_request.pk)
self.assertValidRecipients(['grumpy', 'doc'])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_new_review_request_email_with_from_spoofing_auto_dmarc_deny(self):
"""Testing sending an e-mail when creating a new review request with
mail_from_spoofing=auto and denied by DMARC
"""
self.dmarc_txt_records['_dmarc.example.com'] = 'v=DMARC1; p=reject;'
review_request = self.create_review_request(
summary='My test review request')
review_request.target_people.add(
*User.objects.filter(username__in=('doc', 'grumpy')))
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'],
'Doc Dwarf via Review Board <noreply@example.com>')
self.assertEqual(mail.outbox[0].subject,
'Review Request %s: My test review request'
% review_request.pk)
self.assertValidRecipients(['grumpy', 'doc'])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_review_request_email_local_site_group(self):
"""Testing sending email when a group member is part of a Local Site"""
# This was bug 3581.
local_site = LocalSite.objects.create(name=self.local_site_name)
group = self.create_review_group()
user = User.objects.get(username='grumpy')
local_site.users.add(user)
local_site.admins.add(user)
local_site.save()
group.users.add(user)
group.save()
review_request = self.create_review_request()
review_request.target_groups.add(group)
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertValidRecipients(['doc', 'grumpy'])
def test_review_email(self):
"""Testing sending an e-mail when replying to a review request"""
review_request = self.create_review_request(
summary='My test review request')
review_request.target_people.add(User.objects.get(username='grumpy'))
review_request.target_people.add(User.objects.get(username='doc'))
review_request.publish(review_request.submitter)
# Clear the outbox.
mail.outbox = []
review = self.create_review(review_request=review_request)
review.publish()
from_email = build_email_address_for_user(review.user)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.extra_headers['From'], from_email)
self.assertEqual(email._headers['X-ReviewBoard-URL'],
'http://example.com/')
self.assertEqual(email._headers['X-ReviewRequest-URL'],
'http://example.com/r/%s/'
% review_request.display_id)
self.assertEqual(email.subject,
'Re: Review Request %s: My test review request'
% review_request.display_id)
self.assertValidRecipients([
review_request.submitter.username,
'grumpy',
'doc',
])
message = email.message()
self.assertEqual(message['Sender'], self._get_sender(review.user))
def test_review_email_with_from_spoofing_always(self):
"""Testing sending an e-mail when replying to a review request with
mail_from_spoofing=always
"""
self.dmarc_txt_records['_dmarc.example.com'] = 'v=DMARC1; p=reject;'
review_request = self.create_review_request(
summary='My test review request')
review_request.target_people.add(
*User.objects.filter(username__in=('doc', 'grumpy')))
review_request.publish(review_request.submitter)
review = self.create_review(review_request=review_request)
# Clear the outbox.
mail.outbox = []
settings = {
'mail_from_spoofing': 'always',
}
with self.siteconfig_settings(settings):
review.publish()
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.extra_headers['From'],
'Dopey Dwarf <dopey@example.com>')
self.assertEqual(email._headers['X-ReviewBoard-URL'],
'http://example.com/')
self.assertEqual(email._headers['X-ReviewRequest-URL'],
'http://example.com/r/%s/'
% review_request.display_id)
self.assertEqual(email.subject,
'Re: Review Request %s: My test review request'
% review_request.display_id)
self.assertValidRecipients([
review_request.submitter.username,
'grumpy',
'doc',
])
message = email.message()
self.assertEqual(message['Sender'], self._get_sender(review.user))
def test_review_email_with_from_spoofing_never(self):
"""Testing sending an e-mail when replying to a review request with
mail_from_spoofing=never
"""
review_request = self.create_review_request(
summary='My test review request')
review_request.target_people.add(
*User.objects.filter(username__in=('doc', 'grumpy')))
review_request.publish(review_request.submitter)
review = self.create_review(review_request=review_request)
# Clear the outbox.
mail.outbox = []
settings = {
'mail_from_spoofing': 'never',
}
with self.siteconfig_settings(settings):
review.publish()
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.extra_headers['From'],
'Dopey Dwarf via Review Board <noreply@example.com>')
self.assertEqual(email._headers['X-ReviewBoard-URL'],
'http://example.com/')
self.assertEqual(email._headers['X-ReviewRequest-URL'],
'http://example.com/r/%s/'
% review_request.display_id)
self.assertEqual(email.subject,
'Re: Review Request %s: My test review request'
% review_request.display_id)
self.assertValidRecipients([
review_request.submitter.username,
'grumpy',
'doc',
])
message = email.message()
self.assertEqual(message['Sender'], self._get_sender(review.user))
def test_review_email_with_from_spoofing_auto(self):
"""Testing sending an e-mail when replying to a review request with
mail_from_spoofing=auto and allowed by DMARC
"""
self.dmarc_txt_records['_dmarc.example.com'] = 'v=DMARC1; p=none;'
review_request = self.create_review_request(
summary='My test review request')
review_request.target_people.add(
*User.objects.filter(username__in=('doc', 'grumpy')))
review_request.publish(review_request.submitter)
# Clear the outbox.
mail.outbox = []
review = self.create_review(review_request=review_request)
review.publish()
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.extra_headers['From'],
'Dopey Dwarf <dopey@example.com>')
self.assertEqual(email._headers['X-ReviewBoard-URL'],
'http://example.com/')
self.assertEqual(email._headers['X-ReviewRequest-URL'],
'http://example.com/r/%s/'
% review_request.display_id)
self.assertEqual(email.subject,
'Re: Review Request %s: My test review request'
% review_request.display_id)
self.assertValidRecipients([
review_request.submitter.username,
'grumpy',
'doc',
])
message = email.message()
self.assertEqual(message['Sender'], self._get_sender(review.user))
def test_review_email_with_from_spoofing_auto_dmarc_deny(self):
"""Testing sending an e-mail when replying to a review request with
mail_from_spoofing=auto and denied by DMARC
"""
self.dmarc_txt_records['_dmarc.example.com'] = 'v=DMARC1; p=reject;'
review_request = self.create_review_request(
summary='My test review request')
review_request.target_people.add(
*User.objects.filter(username__in=('doc', 'grumpy')))
review_request.publish(review_request.submitter)
# Clear the outbox.
mail.outbox = []
review = self.create_review(review_request=review_request)
review.publish()
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.extra_headers['From'],
'Dopey Dwarf via Review Board <noreply@example.com>')
self.assertEqual(email._headers['X-ReviewBoard-URL'],
'http://example.com/')
self.assertEqual(email._headers['X-ReviewRequest-URL'],
'http://example.com/r/%s/'
% review_request.display_id)
self.assertEqual(email.subject,
'Re: Review Request %s: My test review request'
% review_request.display_id)
self.assertValidRecipients([
review_request.submitter.username,
'grumpy',
'doc',
])
message = email.message()
self.assertEqual(message['Sender'], self._get_sender(review.user))
@add_fixtures(['test_site'])
def test_review_email_with_site(self):
"""Testing sending an e-mail when replying to a review request
on a Local Site
"""
review_request = self.create_review_request(
summary='My test review request',
with_local_site=True)
review_request.target_people.add(User.objects.get(username='grumpy'))
review_request.target_people.add(User.objects.get(username='doc'))
review_request.publish(review_request.submitter)
# Ensure all the reviewers are on the site.
site = review_request.local_site
site.users.add(*list(review_request.target_people.all()))
# Clear the outbox.
mail.outbox = []
review = self.create_review(review_request=review_request)
review.publish()
from_email = build_email_address_for_user(review.user)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.extra_headers['From'], from_email)
self.assertEqual(email._headers['X-ReviewBoard-URL'],
'http://example.com/s/local-site-1/')
self.assertEqual(email._headers['X-ReviewRequest-URL'],
'http://example.com/s/local-site-1/r/%s/'
% review_request.display_id)
self.assertEqual(email.subject,
'Re: Review Request %s: My test review request'
% review_request.display_id)
self.assertValidRecipients([
review_request.submitter.username,
'grumpy',
'doc',
])
message = email.message()
self.assertEqual(message['Sender'], self._get_sender(review.user))
def test_profile_should_send_email_setting(self):
"""Testing the Profile.should_send_email setting"""
grumpy = User.objects.get(username='grumpy')
profile = grumpy.get_profile()
profile.should_send_email = False
profile.save(update_fields=('should_send_email',))
review_request = self.create_review_request(
summary='My test review request')
review_request.target_people.add(grumpy)
review_request.target_people.add(User.objects.get(username='doc'))
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertValidRecipients(['doc'])
def test_review_request_closed_no_email(self):
"""Tests e-mail is not generated when a review request is closed and
e-mail setting is False
"""
review_request = self.create_review_request()
review_request.publish(review_request.submitter)
# Clear the outbox.
mail.outbox = []
review_request.close(ReviewRequest.SUBMITTED, review_request.submitter)
# Verify that no email is generated as option is false by default
self.assertEqual(len(mail.outbox), 0)
def test_review_request_closed_with_email(self):
"""Tests e-mail is generated when a review request is closed and
e-mail setting is True
"""
with self.siteconfig_settings({'mail_send_review_close_mail': True},
reload_settings=False):
review_request = self.create_review_request()
review_request.publish(review_request.submitter)
# Clear the outbox.
mail.outbox = []
review_request.close(ReviewRequest.SUBMITTED,
review_request.submitter)
from_email = build_email_address_for_user(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
message = mail.outbox[0].message()
self.assertTrue('This change has been marked as submitted'
in message.as_string())
def test_review_request_close_with_email_and_dmarc_deny(self):
"""Tests e-mail is generated when a review request is closed and
e-mail setting is True and From spoofing blocked by DMARC
"""
self.dmarc_txt_records['_dmarc.example.com'] = 'v=DMARC1; p=reject;'
with self.siteconfig_settings({'mail_send_review_close_mail': True},
reload_settings=False):
review_request = self.create_review_request()
review_request.publish(review_request.submitter)
# Clear the outbox.
mail.outbox = []
review_request.close(ReviewRequest.SUBMITTED,
review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'],
'Doc Dwarf via Review Board '
'<noreply@example.com>')
message = mail.outbox[0].message()
self.assertTrue('This change has been marked as submitted'
in message.as_string())
def test_review_to_owner_only(self):
"""Test that e-mails from reviews published to the submitter only will
only go to the submitter and the reviewer
"""
review_request = self.create_review_request(public=True, publish=False)
review_request.target_people.add(User.objects.get(username='grumpy'))
review = self.create_review(review_request=review_request,
publish=False)
with self.siteconfig_settings({'mail_send_review_mail': True},
reload_settings=False):
review.publish(to_owner_only=True)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertEqual(message.cc, [])
self.assertEqual(len(message.to), 2)
self.assertEqual(
set(message.to),
set([build_email_address_for_user(review.user),
build_email_address_for_user(review_request.submitter)]))
def test_review_reply_email(self):
"""Testing sending an e-mail when replying to a review"""
review_request = self.create_review_request(
summary='My test review request')
review_request.publish(review_request.submitter)
base_review = self.create_review(review_request=review_request)
base_review.publish()
# Clear the outbox.
mail.outbox = []
reply = self.create_reply(base_review)
reply.publish()
from_email = build_email_address_for_user(reply.user)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertEqual(mail.outbox[0].subject,
'Re: Review Request %s: My test review request'
% review_request.pk)
self.assertValidRecipients([
review_request.submitter.username,
base_review.user.username,
reply.user.username,
])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'], self._get_sender(reply.user))
def test_review_reply_email_with_dmarc_deny(self):
"""Testing sending an e-mail when replying to a review with From
spoofing blocked by DMARC
"""
self.dmarc_txt_records['_dmarc.example.com'] = 'v=DMARC1; p=reject;'
review_request = self.create_review_request(
summary='My test review request')
review_request.publish(review_request.submitter)
base_review = self.create_review(review_request=review_request)
base_review.publish()
# Clear the outbox.
mail.outbox = []
reply = self.create_reply(base_review)
reply.publish()
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'],
'Grumpy Dwarf via Review Board <noreply@example.com>')
self.assertEqual(mail.outbox[0].subject,
'Re: Review Request %s: My test review request'
% review_request.pk)
self.assertValidRecipients([
review_request.submitter.username,
base_review.user.username,
reply.user.username,
])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'], self._get_sender(reply.user))
def test_update_review_request_email(self):
"""Testing sending an e-mail when updating a review request"""
group = Group.objects.create(name='devgroup',
mailing_list='devgroup@example.com')
review_request = self.create_review_request(
summary='My test review request')
review_request.target_groups.add(group)
review_request.email_message_id = "junk"
review_request.publish(review_request.submitter)
from_email = build_email_address_for_user(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertEqual(mail.outbox[0].subject,
'Re: Review Request %s: My test review request'
% review_request.pk)
self.assertValidRecipients([review_request.submitter.username],
['devgroup'])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_update_review_request_email_with_dmarc_deny(self):
"""Testing sending an e-mail when updating a review request with
From spoofing blocked by DMARC
"""
self.dmarc_txt_records['_dmarc.example.com'] = 'v=DMARC1; p=reject;'
group = Group.objects.create(name='devgroup',
mailing_list='devgroup@example.com')
review_request = self.create_review_request(
summary='My test review request')
review_request.target_groups.add(group)
review_request.email_message_id = "junk"
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'],
'Doc Dwarf via Review Board <noreply@example.com>')
self.assertEqual(mail.outbox[0].subject,
'Re: Review Request %s: My test review request'
% review_request.pk)
self.assertValidRecipients([review_request.submitter.username],
['devgroup'])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_add_reviewer_review_request_email(self):
"""Testing limited e-mail recipients
when adding a reviewer to an existing review request
"""
review_request = self.create_review_request(
summary='My test review request',
public=True)
review_request.email_message_id = "junk"
review_request.target_people.add(User.objects.get(username='dopey'))
review_request.save()
draft = ReviewRequestDraft.create(review_request)
draft.target_people.add(User.objects.get(username='grumpy'))
draft.publish(user=review_request.submitter)
from_email = build_email_address_for_user(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertEqual(mail.outbox[0].subject,
'Re: Review Request %s: My test review request'
% review_request.pk)
# The only included users should be the submitter and 'grumpy' (not
# 'dopey', since he was already included on the review request earlier)
self.assertValidRecipients([review_request.submitter.username,
'grumpy'])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_add_group_review_request_email(self):
"""Testing limited e-mail recipients
when adding a group to an existing review request
"""
existing_group = Group.objects.create(
name='existing', mailing_list='existing@example.com')
review_request = self.create_review_request(
summary='My test review request',
public=True)
review_request.email_message_id = "junk"
review_request.target_groups.add(existing_group)
review_request.target_people.add(User.objects.get(username='dopey'))
review_request.save()
new_group = Group.objects.create(name='devgroup',
mailing_list='devgroup@example.com')
draft = ReviewRequestDraft.create(review_request)
draft.target_groups.add(new_group)
draft.publish(user=review_request.submitter)
from_email = build_email_address_for_user(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertEqual(mail.outbox[0].subject,
'Re: Review Request %s: My test review request'
% review_request.pk)
# The only included users should be the submitter and 'devgroup' (not
# 'dopey' or 'existing', since they were already included on the
# review request earlier)
self.assertValidRecipients([review_request.submitter.username],
['devgroup'])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_limited_recipients_other_fields(self):
"""Testing that recipient limiting only happens when adding reviewers
"""
review_request = self.create_review_request(
summary='My test review request',
public=True)
review_request.email_message_id = "junk"
review_request.target_people.add(User.objects.get(username='dopey'))
review_request.save()
draft = ReviewRequestDraft.create(review_request)
draft.summary = 'Changed summary'
draft.target_people.add(User.objects.get(username='grumpy'))
draft.publish(user=review_request.submitter)
from_email = build_email_address_for_user(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertEqual(mail.outbox[0].subject,
'Re: Review Request %s: Changed summary'
% review_request.pk)
self.assertValidRecipients([review_request.submitter.username,
'dopey', 'grumpy'])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_recipients_with_muted_review_requests(self):
"""Testing e-mail recipients when users mute a review request"""
dopey = User.objects.get(username='dopey')
admin = User.objects.get(username='admin')
group = Group.objects.create(name='group')
group.users.add(admin)
group.save()
review_request = self.create_review_request(
summary='My test review request',
public=True)
review_request.target_people.add(dopey)
review_request.target_people.add(User.objects.get(username='grumpy'))
review_request.target_groups.add(group)
review_request.save()
visit = self.create_visit(review_request, ReviewRequestVisit.MUTED,
dopey)
visit.save()
visit = self.create_visit(review_request, ReviewRequestVisit.MUTED,
admin)
visit.save()
draft = ReviewRequestDraft.create(review_request)
draft.summary = 'Summary changed'
draft.publish(user=review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertValidRecipients(['doc', 'grumpy'])
def test_group_member_not_receive_email(self):
"""Testing sending review e-mails and filtering out the review
submitter when they are part of a review group assigned to the request
"""
# See issue 3985.
submitter = User.objects.get(username='doc')
profile = submitter.get_profile()
profile.should_send_own_updates = False
profile.save(update_fields=('should_send_own_updates',))
reviewer = User.objects.get(username='dopey')
group = self.create_review_group()
group.users.add(submitter)
review_request = self.create_review_request(public=True)
review_request.target_groups.add(group)
review_request.target_people.add(reviewer)
review_request.save()
review = self.create_review(review_request, user=submitter)
review.publish()
self.assertEqual(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertListEqual(
msg.to,
[build_email_address_for_user(reviewer)])
self.assertListEqual(msg.cc, [])
def test_local_site_user_filters(self):
"""Testing sending e-mails and filtering out users not on a local site
"""
test_site = LocalSite.objects.create(name=self.local_site_name)
site_user1 = User.objects.create_user(username='site_user1',
email='site_user1@example.com')
site_user2 = User.objects.create_user(username='site_user2',
email='site_user2@example.com')
site_user3 = User.objects.create_user(username='site_user3',
email='site_user3@example.com')
site_user4 = User.objects.create_user(username='site_user4',
email='site_user4@example.com')
site_user5 = User.objects.create_user(username='site_user5',
email='site_user5@example.com')
non_site_user1 = User.objects.create_user(
username='non_site_user1',
email='non_site_user1@example.com')
non_site_user2 = User.objects.create_user(
username='non_site_user2',
email='non_site_user2@example.com')
non_site_user3 = User.objects.create_user(
username='non_site_user3',
email='non_site_user3@example.com')
test_site.admins.add(site_user1)
test_site.users.add(site_user2)
test_site.users.add(site_user3)
test_site.users.add(site_user4)
test_site.users.add(site_user5)
group = Group.objects.create(name='my-group',
display_name='My Group',
local_site=test_site)
group.users.add(site_user5)
group.users.add(non_site_user3)
review_request = self.create_review_request(with_local_site=True,
local_id=123)
review_request.email_message_id = "junk"
review_request.target_people = [site_user1, site_user2, site_user3,
non_site_user1]
review_request.target_groups = [group]
review = Review.objects.create(review_request=review_request,
user=site_user4)
review.publish()
review = Review.objects.create(review_request=review_request,
user=non_site_user2)
review.publish()
from_email = build_email_address_for_user(review_request.submitter)
# Now that we're set up, send another e-mail.
mail.outbox = []
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertValidRecipients(
['site_user1', 'site_user2', 'site_user3', 'site_user4',
'site_user5', review_request.submitter.username], [])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_review_request_email_with_unicode_from(self):
"""Testing sending a review request e-mail with a Unicode From"""
self.spy_on(logging.exception)
review_request = self.create_review_request()
owner = review_request.owner
owner.first_name = 'Tést'
owner.last_name = 'Üser'
owner.save(update_fields=('first_name', 'last_name'))
review_request.publish(review_request.submitter)
self.assertIsNotNone(review_request.email_message_id)
self.assertFalse(logging.exception.spy.called)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
'=?utf-8?b?VMOpc3Qgw5xzZXI=?= <noreply@example.com>')
self.assertEqual(message['From'],
'=?utf-8?b?VMOpc3Qgw5xzZXI=?= <doc@example.com>')
self.assertEqual(
message['X-Sender'],
'=?utf-8?b?VMOpc3Qgw5xzZXIgPG5vcmVwbHlAZXhhbXBsZS5jb20+?=')
# Make sure this doesn't crash.
message.as_bytes()
def test_review_request_email_with_unicode_summary(self):
"""Testing sending a review request e-mail with a Unicode subject"""
self.spy_on(logging.exception)
review_request = self.create_review_request()
review_request.summary = '\U0001f600'
review_request.publish(review_request.submitter)
self.assertIsNotNone(review_request.email_message_id)
self.assertFalse(logging.exception.spy.called)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertEqual(message['Subject'],
'=?utf-8?q?Review_Request_1=3A_=F0=9F=98=80?=')
# Make sure this doesn't crash.
message.as_bytes()
def test_review_request_email_with_unicode_description(self):
"""Testing sending a review request e-mail with a Unicode
description
"""
self.spy_on(logging.exception)
review_request = self.create_review_request()
review_request.summary = '\U0001f600'
review_request.description = '\U0001f600'
owner = review_request.owner
owner.first_name = 'Tést'
owner.last_name = 'Üser'
owner.save(update_fields=('first_name', 'last_name'))
review_request.publish(review_request.submitter)
self.assertIsNotNone(review_request.email_message_id)
self.assertFalse(logging.exception.spy.called)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertIn('\U0001f600'.encode('utf-8'), message.as_bytes())
# Make sure this doesn't crash.
message.as_bytes()
@add_fixtures(['test_scmtools'])
def test_review_request_email_with_added_file(self):
"""Testing sending a review request e-mail with added files in the
diffset
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
filediff = self.create_filediff(diffset=diffset,
source_file='/dev/null',
source_revision=PRE_CREATION)
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertTrue('X-ReviewBoard-Diff-For' in message._headers)
diff_headers = message._headers.getlist('X-ReviewBoard-Diff-For')
self.assertEqual(len(diff_headers), 1)
self.assertFalse(filediff.source_file in diff_headers)
self.assertTrue(filediff.dest_file in diff_headers)
@add_fixtures(['test_scmtools'])
def test_review_request_email_with_added_files_over_header_limit(self):
"""Testing sending a review request e-mail with added files in the
diffset such that the filename headers take up more than 8192
characters
"""
self.spy_on(logging.warning)
self.maxDiff = None
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
prefix = 'X' * 97
filediffs = []
# Each filename is 100 characters long. For each header we add 26
# characters: the key, a ': ', and the terminating '\r\n'.
# 8192 / (100 + 26) rounds down to 65. We'll bump it up to 70 just
# to be careful.
for i in range(70):
filename = '%s%#03d' % (prefix, i)
self.assertEqual(len(filename), 100)
filediffs.append(self.create_filediff(
diffset=diffset,
source_file=filename,
dest_file=filename,
source_revision=PRE_CREATION,
diff=b'',
save=False))
FileDiff.objects.bulk_create(filediffs)
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-Diff-For', message._headers)
diff_headers = message._headers.getlist('X-ReviewBoard-Diff-For')
self.assertEqual(len(logging.warning.spy.calls), 1)
self.assertEqual(len(diff_headers), 65)
self.assertEqual(
logging.warning.spy.calls[0].args,
('Unable to store all filenames in the X-ReviewBoard-Diff-For '
'headers when sending e-mail for review request %s: The header '
'size exceeds the limit of %s. Remaining headers have been '
'omitted.',
1,
8192))
@add_fixtures(['test_scmtools'])
def test_review_request_email_with_deleted_file(self):
"""Testing sending a review request e-mail with deleted files in the
diffset
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
filediff = self.create_filediff(diffset=diffset,
dest_file='/dev/null',
status=FileDiff.DELETED)
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertTrue('X-ReviewBoard-Diff-For' in message._headers)
diff_headers = message._headers.getlist('X-ReviewBoard-Diff-For')
self.assertEqual(len(diff_headers), 1)
self.assertTrue(filediff.source_file in diff_headers)
self.assertFalse(filediff.dest_file in diff_headers)
@add_fixtures(['test_scmtools'])
def test_review_request_email_with_moved_file(self):
"""Testing sending a review request e-mail with moved files in the
diffset
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
filediff = self.create_filediff(diffset=diffset,
source_file='foo',
dest_file='bar',
status=FileDiff.MOVED)
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertTrue('X-ReviewBoard-Diff-For' in message._headers)
diff_headers = message._headers.getlist('X-ReviewBoard-Diff-For')
self.assertEqual(len(diff_headers), 2)
self.assertTrue(filediff.source_file in diff_headers)
self.assertTrue(filediff.dest_file in diff_headers)
@add_fixtures(['test_scmtools'])
def test_review_request_email_with_copied_file(self):
"""Testing sending a review request e-mail with copied files in the
diffset
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
filediff = self.create_filediff(diffset=diffset,
source_file='foo',
dest_file='bar',
status=FileDiff.COPIED)
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertTrue('X-ReviewBoard-Diff-For' in message._headers)
diff_headers = message._headers.getlist('X-ReviewBoard-Diff-For')
self.assertEqual(len(diff_headers), 2)
self.assertTrue(filediff.source_file in diff_headers)
self.assertTrue(filediff.dest_file in diff_headers)
@add_fixtures(['test_scmtools'])
def test_review_request_email_with_modified_file(self):
"""Testing sending a review request e-mail with modified files in
the diffset
"""
# Bug #4572 reported that the 'X-ReviewBoard-Diff-For' header appeared
# only for newly created files and moved files. This test is to check
# that the header appears for modified files as well.
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
filediff = self.create_filediff(diffset=diffset,
source_file='foo',
dest_file='bar',
status=FileDiff.MODIFIED)
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-Diff-For', message._headers)
diff_headers = message._headers.getlist('X-ReviewBoard-Diff-For')
self.assertEqual(len(diff_headers), 2)
self.assertIn(filediff.source_file, diff_headers)
self.assertIn(filediff.dest_file, diff_headers)
@add_fixtures(['test_scmtools'])
def test_review_request_email_with_multiple_files(self):
"""Testing sending a review request e-mail with multiple files in the
diffset
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
filediffs = [
self.create_filediff(diffset=diffset,
source_file='foo',
dest_file='bar',
status=FileDiff.MOVED),
self.create_filediff(diffset=diffset,
source_file='baz',
dest_file='/dev/null',
status=FileDiff.DELETED)
]
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertTrue('X-ReviewBoard-Diff-For' in message._headers)
diff_headers = message._headers.getlist('X-ReviewBoard-Diff-For')
self.assertEqual(
set(diff_headers),
{
filediffs[0].source_file,
filediffs[0].dest_file,
filediffs[1].source_file,
})
def test_extra_headers_dict(self):
"""Testing sending extra headers as a dict with an e-mail message"""
review_request = self.create_review_request()
submitter = review_request.submitter
send_email(prepare_base_review_request_mail,
user=submitter,
review_request=review_request,
subject='Foo',
in_reply_to=None,
to_field=[submitter],
cc_field=[],
template_name_base='notifications/review_request_email',
extra_headers={'X-Foo': 'Bar'})
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-Foo', message._headers)
self.assertEqual(message._headers['X-Foo'], 'Bar')
def test_extra_headers_multivalue_dict(self):
"""Testing sending extra headers as a MultiValueDict with an e-mail
message
"""
header_values = ['Bar', 'Baz']
review_request = self.create_review_request()
submitter = review_request.submitter
send_email(prepare_base_review_request_mail,
user=review_request.submitter,
review_request=review_request,
subject='Foo',
in_reply_to=None,
to_field=[submitter],
cc_field=[],
template_name_base='notifications/review_request_email',
extra_headers=MultiValueDict({'X-Foo': header_values}))
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-Foo', message._headers)
self.assertEqual(set(message._headers.getlist('X-Foo')),
set(header_values))
def test_review_no_shipit_headers(self):
"""Testing sending a review e-mail without a 'Ship It!'"""
review_request = self.create_review_request(public=True)
self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
publish=True)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertNotIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertFalse(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_review_shipit_only_headers(self):
"""Testing sending a review e-mail with only a 'Ship It!'"""
review_request = self.create_review_request(public=True)
self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=True)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertFalse(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_review_shipit_only_headers_no_text(self):
"""Testing sending a review e-mail with only a 'Ship It!' and no text
"""
review_request = self.create_review_request(public=True)
self.create_review(review_request,
body_top='',
body_bottom='',
ship_it=True,
publish=True)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertFalse(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_review_shipit_headers_custom_top_text(self):
"""Testing sending a review e-mail with a 'Ship It' and custom top text
"""
review_request = self.create_review_request(public=True)
self.create_review(review_request,
body_top='Some general information.',
body_bottom='',
ship_it=True,
publish=True)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertFalse(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_review_shipit_headers_bottom_text(self):
"""Testing sending a review e-mail with a 'Ship It' and bottom text"""
review_request = self.create_review_request(public=True)
self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='Some comments',
ship_it=True,
publish=True)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertFalse(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
@add_fixtures(['test_scmtools'])
def test_review_shipit_headers_comments(self):
"""Testing sending a review e-mail with a 'Ship It' and diff comments
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository,
public=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=False)
self.create_diff_comment(review, filediff)
review.publish()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertFalse(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
@add_fixtures(['test_scmtools'])
def test_review_shipit_headers_comments_opened_issue(self):
"""Testing sending a review e-mail with a 'Ship It' and diff comments
with opened issue
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository,
public=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=False)
self.create_diff_comment(review, filediff, issue_opened=True)
review.publish()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertTrue(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_review_shipit_headers_attachment_comments(self):
"""Testing sending a review e-mail with a 'Ship It' and file attachment
comments
"""
review_request = self.create_review_request(public=True)
file_attachment = self.create_file_attachment(review_request)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=False)
self.create_file_attachment_comment(review, file_attachment)
review.publish()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertFalse(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_review_shipit_headers_attachment_comments_opened_issue(self):
"""Testing sending a review e-mail with a 'Ship It' and file attachment
comments with opened issue
"""
review_request = self.create_review_request(public=True)
file_attachment = self.create_file_attachment(review_request)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=False)
self.create_file_attachment_comment(review, file_attachment,
issue_opened=True)
review.publish()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertTrue(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_review_shipit_headers_screenshot_comments(self):
"""Testing sending a review e-mail with a 'Ship It' and screenshot
comments
"""
review_request = self.create_review_request(public=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=False)
self.create_screenshot_comment(review, screenshot)
review.publish()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertFalse(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_review_shipit_headers_screenshot_comments_opened_issue(self):
"""Testing sending a review e-mail with a 'Ship It' and screenshot
comments with opened issue
"""
review_request = self.create_review_request(public=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=False)
self.create_screenshot_comment(review, screenshot, issue_opened=True)
review.publish()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertTrue(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_review_shipit_headers_general_comments(self):
"""Testing sending a review e-mail with a 'Ship It' and general
comments
"""
review_request = self.create_review_request(public=True)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=False)
self.create_general_comment(review)
review.publish()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertFalse(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_review_shipit_headers_general_comments_opened_issue(self):
"""Testing sending a review e-mail with a 'Ship It' and general
comments with opened issue
"""
review_request = self.create_review_request(public=True)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=False)
self.create_general_comment(review, issue_opened=True)
review.publish()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
self.assertTrue(Review.FIX_IT_THEN_SHIP_IT_TEXT in
message.message().as_string())
def test_change_ownership_email(self):
"""Testing sending a review request e-mail when the owner is being
changed
"""
admin_user = User.objects.get(username='admin')
admin_email = build_email_address_for_user(admin_user)
review_request = self.create_review_request(public=True)
submitter = review_request.submitter
submitter_email = build_email_address_for_user(submitter)
draft = ReviewRequestDraft.create(review_request)
draft.target_people = [submitter, admin_user]
draft.owner = admin_user
draft.save()
review_request.publish(submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertEqual(message.extra_headers['From'], submitter_email)
self.assertSetEqual(set(message.to),
{admin_email, submitter_email})
def test_change_ownership_email_not_submitter(self):
"""Testing sending a review request e-mail when the owner is being
changed by someone else
"""
admin_user = User.objects.get(username='admin')
admin_email = build_email_address_for_user(admin_user)
review_request = self.create_review_request(public=True)
submitter = review_request.submitter
submitter_email = build_email_address_for_user(submitter)
draft = ReviewRequestDraft.create(review_request)
# Before publishing, target_people must be added.
draft.target_people = [admin_user, submitter]
draft.owner = admin_user
draft.save()
review_request.publish(admin_user)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertEqual(message.extra_headers['From'], admin_email)
self.assertSetEqual(set(message.to),
{admin_email, submitter_email})
def _get_sender(self, user):
return build_email_address(full_name=user.get_full_name(),
email=self.sender)
class ReviewRequestSiteRootURLTests(SiteRootURLTestsMixin,
ReviewRequestEmailTestsMixin, TestCase):
"""Tests for Bug 4612 related to review request and review e-mails."""
@override_settings(**SiteRootURLTestsMixin.CUSTOM_SITE_ROOT_SETTINGS)
def test_review_request_email_site_root_custom(self):
"""Testing review request e-mail includes site root only once with
custom site root
"""
review_request = self.create_review_request()
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
absolute_url = review_request.get_absolute_url()
review_request_url = build_server_url(absolute_url)
bad_review_request_url = '%s%s' % (get_server_url(), absolute_url)
self.assertNotIn(self.BAD_SITE_ROOT, review_request_url)
self.assertIn(self.BAD_SITE_ROOT, bad_review_request_url)
self.assertIn(review_request_url, message.body)
self.assertNotIn(bad_review_request_url, message.body)
for alternative in message.alternatives:
self.assertNotIn(bad_review_request_url, alternative)
def test_review_request_email_site_root_default(self):
"""Testing review request e-mail includes site root only once with
default site root
"""
review_request = self.create_review_request()
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertNotIn('example.com//', message.body)
for alternative in message.alternatives:
self.assertNotIn('example.com//', alternative[0])
@add_fixtures(['test_site'])
@override_settings(**SiteRootURLTestsMixin.CUSTOM_SITE_ROOT_SETTINGS)
def test_review_request_email_site_root_custom_with_localsite(self):
"""Testing review request e-mail includes site root only once with
custom site root and a LocalSite
"""
review_request = self.create_review_request(with_local_site=True)
with self.settings(SITE_ROOT='/foo/'):
review_request.publish(review_request.submitter)
absolute_url = review_request.get_absolute_url()
bad_review_request_url = '%s%s' % (get_server_url(), absolute_url)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
review_request_url = build_server_url(absolute_url)
self.assertNotIn(self.BAD_SITE_ROOT, review_request_url)
self.assertIn(self.BAD_SITE_ROOT, bad_review_request_url)
self.assertIn(review_request_url, message.body)
self.assertNotIn(bad_review_request_url, message.body)
for alternative in message.alternatives:
self.assertNotIn(bad_review_request_url, alternative[0])
@add_fixtures(['test_site'])
def test_review_request_email_site_root_default_with_localsite(self):
"""Testing review request e-mail includes site root only once with
default site root and a LocalSite
"""
review_request = self.create_review_request()
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertNotIn('example.com//', message.body)
for alternative in message.alternatives:
self.assertNotIn('example.com//', alternative[0])
@override_settings(**SiteRootURLTestsMixin.CUSTOM_SITE_ROOT_SETTINGS)
def test_review_email_site_root_custom(self):
"""Testing review e-mail includes site root only once with custom site
root
"""
review_request = self.create_review_request(public=True)
review = self.create_review(review_request=review_request)
review.publish(review.user)
review_url = build_server_url(review.get_absolute_url())
bad_review_url = '%s%s' % (get_server_url(), review.get_absolute_url())
self.assertNotIn(self.BAD_SITE_ROOT, review_url)
self.assertIn(self.BAD_SITE_ROOT, bad_review_url)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertNotIn(bad_review_url, message.body)
for alternative in message.alternatives:
self.assertNotIn(bad_review_url, alternative[0])
def test_review_email_site_root_default(self):
"""Testing review e-mail includes site root only once with default site
root
"""
review_request = self.create_review_request(public=True)
review = self.create_review(review_request=review_request)
review.publish(review.user)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertNotIn('example.com//', message.body)
for alternative in message.alternatives:
self.assertNotIn('example.com//', alternative[0])
@add_fixtures(['test_site'])
@override_settings(**SiteRootURLTestsMixin.CUSTOM_SITE_ROOT_SETTINGS)
def test_review_email_site_root_custom_with_localsite(self):
"""Testing review e-mail includes site root only once with custom site
root and a LocalSite
"""
review_request = self.create_review_request(public=True,
with_local_site=True)
review = self.create_review(review_request=review_request)
review.publish(review.user)
review_url = build_server_url(review.get_absolute_url())
bad_review_url = '%s%s' % (get_server_url(), review.get_absolute_url())
self.assertNotIn(self.BAD_SITE_ROOT, review_url)
self.assertIn(self.BAD_SITE_ROOT, bad_review_url)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertNotIn(bad_review_url, message.body)
for alternative in message.alternatives:
self.assertNotIn(bad_review_url, alternative[0])
@add_fixtures(['test_site'])
def test_review_email_site_root_default_with_localsite(self):
"""Testing review e-mail includes site root only once with default site
root and a LocalSite
"""
review_request = self.create_review_request(public=True,
with_local_site=True)
review = self.create_review(review_request=review_request)
review.publish(review.user)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertNotIn('example.com//', message.body)
for alternative in message.alternatives:
self.assertNotIn('example.com//', alternative[0])
class WebAPITokenEmailTestsMixin(EmailTestHelper):
"""A mixin for web hook-related e-mail tests."""
email_siteconfig_settings = {
'mail_send_new_user_mail': False,
}
def setUp(self):
super(WebAPITokenEmailTestsMixin, self).setUp()
self.user = User.objects.create_user(username='test-user',
first_name='Sample',
last_name='User',
email='test-user@example.com')
self.assertEqual(len(mail.outbox), 0)
class WebAPITokenEmailTests(WebAPITokenEmailTestsMixin, TestCase):
"""Unit tests for WebAPIToken creation e-mails."""
def test_create_token(self):
"""Testing sending e-mail when a new API Token is created"""
webapi_token = WebAPIToken.objects.generate_token(user=self.user,
note='Test',
policy={})
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
html_body = email.alternatives[0][0]
partial_token = '%s...' % webapi_token.token[:10]
self.assertEqual(email.subject, 'New Review Board API token created')
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.extra_headers['From'],
settings.DEFAULT_FROM_EMAIL)
self.assertEqual(email.to[0], build_email_address_for_user(self.user))
self.assertNotIn(webapi_token.token, email.body)
self.assertNotIn(webapi_token.token, html_body)
self.assertIn(partial_token, email.body)
self.assertIn(partial_token, html_body)
self.assertIn('A new API token has been added to your Review Board '
'account',
email.body)
self.assertIn('A new API token has been added to your Review Board '
'account',
html_body)
def test_create_token_no_email(self):
"""Testing WebAPIToken.objects.generate_token does not send e-mail
when auto_generated is True
"""
WebAPIToken.objects.generate_token(user=self.user,
note='Test',
policy={},
auto_generated=True)
self.assertEqual(len(mail.outbox), 0)
def test_update_token(self):
"""Testing sending e-mail when an existing API Token is updated"""
webapi_token = WebAPIToken.objects.generate_token(user=self.user,
note='Test',
policy={})
mail.outbox = []
webapi_token.save()
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
html_body = email.alternatives[0][0]
partial_token = '%s...' % webapi_token.token[:10]
self.assertEqual(email.subject, 'Review Board API token updated')
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.extra_headers['From'],
settings.DEFAULT_FROM_EMAIL)
self.assertEqual(email.to[0], build_email_address_for_user(self.user))
self.assertNotIn(webapi_token.token, email.body)
self.assertNotIn(webapi_token.token, html_body)
self.assertIn(partial_token, email.body)
self.assertIn(partial_token, html_body)
self.assertIn('One of your API tokens has been updated on your '
'Review Board account',
email.body)
self.assertIn('One of your API tokens has been updated on your '
'Review Board account',
html_body)
def test_delete_token(self):
"""Testing sending e-mail when an existing API Token is deleted"""
webapi_token = WebAPIToken.objects.generate_token(user=self.user,
note='Test',
policy={})
mail.outbox = []
webapi_token.delete()
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
html_body = email.alternatives[0][0]
self.assertEqual(email.subject, 'Review Board API token deleted')
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.extra_headers['From'],
settings.DEFAULT_FROM_EMAIL)
self.assertEqual(email.to[0], build_email_address_for_user(self.user))
self.assertIn(webapi_token.token, email.body)
self.assertIn(webapi_token.token, html_body)
self.assertIn('One of your API tokens has been deleted from your '
'Review Board account',
email.body)
self.assertIn('One of your API tokens has been deleted from your '
'Review Board account',
html_body)
class WebAPITokenSiteRootURLTests(SiteRootURLTestsMixin,
WebAPITokenEmailTestsMixin, TestCase):
"""Tests for Bug 4612 related to web API token e-mails."""
@override_settings(**SiteRootURLTestsMixin.CUSTOM_SITE_ROOT_SETTINGS)
def test_create_token_site_root_custom(self):
"""Testing WebAPI Token e-mails include site root only once with custom
site root
"""
WebAPIToken.objects.generate_token(user=self.user, note='Test',
policy={})
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertNotIn(self.BAD_SITE_ROOT, message.body)
for alternative in message.alternatives:
self.assertNotIn(self.BAD_SITE_ROOT, alternative[0])
def test_create_token_site_root_default(self):
"""Testing WebAPI Token e-mails include site root only once with
default site root
"""
WebAPIToken.objects.generate_token(user=self.user, note='Test',
policy={})
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertNotIn('example.com//', message.body)
for alternative in message.alternatives:
self.assertNotIn('example.com//', alternative[0])
@add_fixtures(['test_site'])
@override_settings(**SiteRootURLTestsMixin.CUSTOM_SITE_ROOT_SETTINGS)
def test_create_token_site_root_custom_with_localsite(self):
"""Testing WebAPI Token e-mails include site root only once with custom
site root and a LocalSite
"""
local_site = LocalSite.objects.get(pk=1)
WebAPIToken.objects.generate_token(user=self.user, note='Test',
policy={}, local_site=local_site)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertNotIn(self.BAD_SITE_ROOT, message.body)
for alternative in message.alternatives:
self.assertNotIn(self.BAD_SITE_ROOT, alternative[0])
@add_fixtures(['test_site'])
def test_create_token_site_root_default_with_localsite(self):
"""Testing WebAPI Token e-mails include site root only once with
default site root and a LocalSite
"""
local_site = LocalSite.objects.get(pk=1)
WebAPIToken.objects.generate_token(user=self.user, note='Test',
policy={}, local_site=local_site)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertNotIn('example.com//', message.body)
for alternative in message.alternatives:
self.assertNotIn('example.com//', alternative[0])
| chipx86/reviewboard | reviewboard/notifications/tests/test_email_sending.py | Python | mit | 87,250 | [
"VisIt"
] | 71d8f74e3a11e9167881c6919dafbf0251d772bba27f971cb1b4ca603607e5b3 |
"""
Gaussian HMM of stock data
--------------------------
This script shows how to use Gaussian HMM on stock price data from
Yahoo! finance. For more information on how to visualize stock prices
with matplotlib, please refer to ``date_demo1.py`` of matplotlib.
"""
from __future__ import print_function
import datetime
import numpy as np
from matplotlib import cm, pyplot as plt
from matplotlib.dates import YearLocator, MonthLocator
try:
from matplotlib.finance import quotes_historical_yahoo_ochl
except ImportError:
# For Matplotlib prior to 1.5.
from matplotlib.finance import (
quotes_historical_yahoo as quotes_historical_yahoo_ochl
)
from hmmlearn.hmm import GaussianHMM
print(__doc__)
###############################################################################
# Get quotes from Yahoo! finance
quotes = quotes_historical_yahoo_ochl(
"INTC", datetime.date(1995, 1, 1), datetime.date(2012, 1, 6))
# Unpack quotes
dates = np.array([q[0] for q in quotes], dtype=int)
close_v = np.array([q[2] for q in quotes])
volume = np.array([q[5] for q in quotes])[1:]
# Take diff of close value. Note that this makes
# ``len(diff) = len(close_t) - 1``, therefore, other quantities also
# need to be shifted by 1.
diff = np.diff(close_v)
dates = dates[1:]
close_v = close_v[1:]
# Pack diff and volume for training.
X = np.column_stack([diff, volume])
###############################################################################
# Run Gaussian HMM
print("fitting to HMM and decoding ...", end="")
# Make an HMM instance and execute fit
model = GaussianHMM(n_components=4, covariance_type="diag", n_iter=1000).fit(X)
# Predict the optimal sequence of internal hidden state
hidden_states = model.predict(X)
print("done")
###############################################################################
# Print trained parameters and plot
print("Transition matrix")
print(model.transmat_)
print()
print("Means and vars of each hidden state")
for i in range(model.n_components):
print("{0}th hidden state".format(i))
print("mean = ", model.means_[i])
print("var = ", np.diag(model.covars_[i]))
print()
fig, axs = plt.subplots(model.n_components, sharex=True, sharey=True)
colours = cm.rainbow(np.linspace(0, 1, model.n_components))
for i, (ax, colour) in enumerate(zip(axs, colours)):
# Use fancy indexing to plot data in each state.
mask = hidden_states == i
ax.plot_date(dates[mask], close_v[mask], ".-", c=colour)
ax.set_title("{0}th hidden state".format(i))
# Format the ticks.
ax.xaxis.set_major_locator(YearLocator())
ax.xaxis.set_minor_locator(MonthLocator())
ax.grid(True)
plt.show() | siddharthhparikh/INFM750-project | HMM_test.py | Python | apache-2.0 | 2,680 | [
"Gaussian"
] | c79435823a173f2b6687103442106c9bfac9738655a8898688a1b49d12032141 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.