text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
################################################################################
# Name: PyZenity.py
# Author: Brian Ramos
# Created: 10/17/2005
# Revision Information:
# $Date: $
# $Revision: $
# $Author: bramos $
#
# Licence: MIT Licence
#
# Copyright (c) 2010 Brian Ramos
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
################################################################################
from datetime import date
from subprocess import Popen, PIPE
from itertools import chain
from os import path
__all__ = ['GetDate', 'GetFilename', 'GetDirectory', 'GetSavename', 'GetText',
'InfoMessage', 'Question', 'Warning', 'ErrorMessage',
'Notification', 'TextInfo', 'Progress','List' ]
__doc__ = """PyZenity is an easy to use interface to Zenity for Python.
Zenity is normally called from scripts by invoking it with a multitude of
command line parameters that it uses to construct its interfaces. This
module hides the details of invoking the command and presents simple API
functions like:
cancel = Question('Should I cancel the operation?')
Each function takes optional kwargs parameters. This is to allow the use of
general Zenity parameters such as:
title - Set the dialog title
window_icon - Set the window icon
ok_label - Set the text for the Ok label
cancel_label - Set the text for the Cancel label
height - Set the height
width - Set the width
timeout - Set the dialog timeout in seconds"""
zen_exec = 'zenity'
def run_zenity(type, *args):
return Popen([zen_exec, type] + list(args), stdin=PIPE, stdout=PIPE)
# This is a dictionary of optional parameters that would create
# syntax errors in python if they were passed in as kwargs.
kw_subst = {
'window_icon': 'window-icon',
'ok_label': 'ok-label',
'cancel_label': 'cancel-label'
}
def kwargs_helper(kwargs):
"""This function preprocesses the kwargs dictionary to sanitize it."""
args = []
for param, value in kwargs.items():
param = kw_subst.get(param, param)
args.append((param, value))
return args
def GetDate(text=None, selected=None, **kwargs):
"""Prompt the user for a date.
This will raise a Zenity Calendar Dialog for the user to pick a date.
It will return a datetime.date object with the date or None if the
user hit cancel.
text - Text to be displayed in the calendar dialog.
selected - A datetime.date object that will be the pre-selected date.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--date-format=%d/%m/%Y']
if text:
args.append('--text=%s' % text)
if selected:
args.append('--day=%d' % selected.day)
args.append('--month=%d' % selected.month)
args.append('--year=%d' % selected.year)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--calendar', *args)
if p.wait() == 0:
retval = p.stdout.read().strip()
day, month, year = [int(x) for x in retval.split('/')]
return date(year, month, day)
def GetFilename(multiple=False, sep='|', **kwargs):
"""Prompt the user for a filename.
This will raise a Zenity File Selection Dialog. It will return a list with
the selected files or None if the user hit cancel.
multiple - True to allow the user to select multiple files.
sep - Token to use as the path separator when parsing Zenity's return
string.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = []
if multiple:
args.append('--multiple')
if sep != '|':
args.append('--separator=%s' % sep)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--file-selection', *args)
if p.wait() == 0:
return p.stdout.read()[:-1].split('|')
def GetDirectory(multiple=False, selected=None, sep=None, **kwargs):
"""Prompt the user for a directory.
This will raise a Zenity Directory Selection Dialog. It will return a
list with the selected directories or None if the user hit cancel.
multiple - True to allow the user to select multiple directories.
selected - Path to the directory to be selected on startup.
sep - Token to use as the path separator when parsing Zenity's return
string.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--directory']
if multiple:
args.append('--multiple')
if selected:
if not path.lexists(selected):
raise ValueError("File %s does not exist!" % selected)
args.append('--filename=%s' % selected)
if sep:
args.append('--separator=%s' % sep)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--file-selection', *args)
if p.wait() == 0:
return p.stdout.read().strip().split('|')
def GetSavename(default=None, **kwargs):
"""Prompt the user for a filename to save as.
This will raise a Zenity Save As Dialog. It will return the name to save
a file as or None if the user hit cancel.
default - The default name that should appear in the save as dialog.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--save']
if default:
args.append('--filename=%s' % default)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--file-selection', *args)
if p.wait() == 0:
return p.stdout.read().strip().split('|')
def Notification(text=None, window_icon=None, **kwargs):
"""Put an icon in the notification area.
This will put an icon in the notification area and return when the user
clicks on it.
text - The tooltip that will show when the user hovers over it.
window_icon - The stock icon ("question", "info", "warning", "error") or
path to the icon to show.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = []
if text:
args.append('--text=%s' % text)
if window_icon:
args.append('--window-icon=%s' % window_icon)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--notification', *args)
p.wait()
def List(column_names, title=None, boolstyle=None, editable=False,
select_col=None, sep='|', data=[], **kwargs):
"""Present a list of items to select.
This will raise a Zenity List Dialog populated with the colomns and rows
specified and return either the cell or row that was selected or None if
the user hit cancel.
column_names - A tuple or list containing the names of the columns.
title - The title of the dialog box.
boolstyle - Whether the first columns should be a bool option ("checklist",
"radiolist") or None if it should be a text field.
editable - True if the user can edit the cells.
select_col - The column number of the selected cell to return or "ALL" to
return the entire row.
sep - Token to use as the row separator when parsing Zenity's return.
Cells should not contain this token.
data - A list or tuple of tuples that contain the cells in the row. The
size of the row's tuple must be equal to the number of columns.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = []
for column in column_names:
args.append('--column=%s' % column)
if title:
args.append('--title=%s' % title)
if boolstyle:
if not (boolstyle == 'checklist' or boolstyle == 'radiolist'):
raise ValueError('"%s" is not a proper boolean column style.'
% boolstyle)
args.append('--' + boolstyle)
if editable:
args.append('--editable')
if select_col:
args.append('--print-column=%s' % select_col)
if sep != '|':
args.append('--separator=%s' % sep)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
for datum in chain(*data):
args.append(str(datum))
p = run_zenity('--list', *args)
if p.wait() == 0:
return p.stdout.read().strip().split(sep)
def ErrorMessage(text, **kwargs):
"""Show an error message dialog to the user.
This will raise a Zenity Error Dialog with a description of the error.
text - A description of the error.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--text=%s' % text]
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
run_zenity('--error', *args).wait()
def InfoMessage(text, **kwargs):
"""Show an info message dialog to the user.
This will raise a Zenity Info Dialog displaying some information.
text - The information to present to the user.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--text=%s' % text]
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
run_zenity('--info', *args).wait()
def Question(text, **kwargs):
"""Ask the user a question.
This will raise a Zenity Question Dialog that will present the user with an
OK/Cancel dialog box. It returns True if the user clicked OK; False on
Cancel.
text - The question to ask.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--text=%s' % text]
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
return run_zenity('--question', *args).wait() == 0
def Warning(text, **kwargs):
"""Show a warning message dialog to the user.
This will raise a Zenity Warning Dialog with a description of the warning.
It returns True if the user clicked OK; False on cancel.
text - A description of the warning.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--text=%s' % text]
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
return run_zenity('--warning', *args).wait() == 0
def Progress(text='', percentage=0, auto_close=False, pulsate=False, no_cancel=False, **kwargs):
"""Show a progress dialog to the user.
This will raise a Zenity Progress Dialog. It returns a callback that
accepts two arguments. The first is a numeric value of the percent
complete. The second is a message about the progress.
NOTE: This function sends the SIGHUP signal if the user hits the cancel
button. You must connect to this signal if you do not want your
application to exit.
text - The initial message about the progress.
percentage - The initial percentage to set the progress bar to.
auto_close - True if the dialog should close automatically if it reaches
100%.
pulsate - True is the status should pulsate instead of progress.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = []
if text:
args.append('--text=%s' % text)
if percentage:
args.append('--percentage=%s' % percentage)
if auto_close:
args.append('--auto-close=%s' % auto_close)
if no_cancel:
args.append('--no-cancel=%s' % no_cancel)
if pulsate:
args.append('--pulsate=%s' % pulsate)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = Popen([zen_exec, '--progress'] + args, stdin=PIPE, stdout=PIPE)
def update(percent, message=''):
if type(percent) == float:
percent = int(percent * 100)
p.stdin.write(str(percent) + '\n')
if message:
p.stdin.write('# %s\n' % message)
return p.returncode
return update
def GetText(text='', entry_text='', password=False, **kwargs):
"""Get some text from the user.
This will raise a Zenity Text Entry Dialog. It returns the text the user
entered or None if the user hit cancel.
text - A description of the text to enter.
entry_text - The initial value of the text entry box.
password - True if text entered should be hidden by stars.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = []
if text:
args.append('--text=%s' % text)
if entry_text:
args.append('--entry-text=%s' % entry_text)
if password:
args.append('--hide-text')
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--entry', *args)
if p.wait() == 0:
return p.stdout.read()[:-1]
def TextInfo(filename=None, editable=False, html_support=False, **kwargs):
"""Show the text of a file to the user.
This will raise a Zenity Text Information Dialog presenting the user with
the contents of a file. It returns the contents of the text box.
filename - The path to the file to show.
editable - True if the text should be editable.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = []
if filename:
args.append('--filename=%s' % filename)
if editable:
args.append('--editable')
if html_support is True:
args.append('--html')
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--text-info', *args)
if p.wait() == 0:
return p.stdout.read()
|
dleicht/PSB
|
PyZenity.py
|
Python
|
mit
| 15,175
|
[
"Brian"
] |
641d8dadc81f8c10d4ae3f6cd6afb99f83ff73c66741ffcbfde67c0c3c3d422f
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Run a query on the tables
"""
from gramps.gen.simple import SimpleAccess, SimpleDoc
try:
#quack
from gramps.gui.plug.quick import QuickTable
except:
# For testing framework
class QuickTable(object):
def __init__(self, database):
self.db = database
self.data = []
self.column_labels = []
self.links = []
def rows(self, *cols, **kwargs):
self.data.append(cols)
self.links.append(kwargs.get("link", None))
def columns(self, *cols):
self.column_labels = cols[:]
def write(self, document):
pass
from gramps.gen.const import GRAMPS_LOCALE as glocale
try:
_trans = glocale.get_addon_translator(__file__)
except ValueError:
_trans = glocale.translation
_ = _trans.gettext
import gramps.gen.datehandler
import gramps.gen.lib
from gramps.gen.lib.handle import Handle
from gramps.gen.lib.primaryobj import PrimaryObject
from gramps.gen.lib.struct import Struct
import random
import re
import traceback
import itertools
import time
class Environment(dict):
"""
Environment class for providing a specialized env
so that eval(expr, env) will be able to find items
in the struct.
"""
def __init__(self, *args, **kwargs):
""" Initialize environment as a regular dict """
dict.__init__(self, *args, **kwargs)
def __getitem__(self, key):
"""
Try looking up the item in struct first, and if failing
look in self (a dict).
"""
try:
return self.struct[key]
except:
if key in self:
return dict.__getitem__(self, key)
else:
raise NameError("name '%s' is not defined" % key)
def set_struct(self, struct):
"""
Set the struct of the Environment.
"""
self.struct = struct
class DBI(object):
"""
The SQL-like interface to the database and document instances.
"""
def __init__(self, database, document=None):
self.database = database
self.document = document
self.data = {}
self.select = 0
self.flat = False
self.raw = False
if self.database:
for name in ('Person', 'Family', 'Event', 'Place', 'Repository',
'Source', 'Citation', 'Media', 'Note', 'Tag'):
d = self.database._tables[name]["class_func"]().to_struct()
self.data[name.lower()] = d.keys()
# The macros:
self.shortcuts = {
"SURNAME": "primary_name.surname_list[0].surname",
"GIVEN": "primary_name.first_name",
}
def parse(self, query):
"""
Parse the query.
"""
self.query_text = query.replace("\n", " ").strip()
lexed = self.lexer(self.query_text)
#print(lexed)
self.parser(lexed)
for col_name in self.columns[:]: # copy
if col_name == "*":
self.columns.remove('*')
# this is useful to see what _class it is:
self.columns.extend(self.get_columns(self.table))
# otherwise remove metadata:
#self.columns.extend([col for col in self.get_columns(self.table) if not col.startswith("_"))
def lexer(self, string):
"""
Given a string, break into a list of chunks.
"""
retval = []
state = None
current = ""
stack = []
i = 0
# Handle macros:
for key in self.shortcuts.keys():
string = string.replace(key, self.shortcuts[key])
# (some "expression" in ok)
# func(some "expression" in (ok))
# list[1][0]
# [x for x in list]
while i < len(string):
ch = string[i]
#print("lex:", i, ch, state, retval, current)
if state == "in-double-quote":
if ch == '"':
state = stack.pop()
current += ch
elif state == "in-single-quote":
if ch == "'":
state = stack.pop()
current += ch
elif state == "in-expr":
if ch == ")":
state = stack.pop()
elif ch == "(":
stack.append("in-expr")
current += ch
elif state == "in-square-bracket":
if ch == "]":
state = stack.pop()
elif ch == "[":
stack.append("in-square-bracket")
current += ch
elif ch == '"':
stack.append(state)
state = "in-double-quote"
current += ch
elif ch == "'":
stack.append(state)
state = "in-single-quote"
current += ch
elif ch == "(":
stack.append(state)
state = "in-expr"
current += "("
elif ch == "[":
stack.append(state)
state = "in-square-bracket"
current += "["
elif ch == ",":
if current:
retval.append(current)
retval.append(",")
current = ""
elif ch == "=":
if current:
retval.append(current)
retval.append("=")
current = ""
elif ch in [' ', '\t', '\n', ";"]: # break
if current:
retval.append(current)
if current.upper() == "WHERE":
# HACK: get rest of string:
if string[-1] == ";":
retval.append(string[i + 1:-1])
i = len(string) - 2
else:
retval.append(string[i + 1:])
i = len(string) - 1
current = ""
else:
pass # ignore whitespace
else:
current += ch
i += 1
if current:
retval.append(current)
#print("lexed:", retval)
return retval
def parser(self, lex):
"""
Takes output of lexer, and sets the values.
After parser, the DBI will be ready to process query.
"""
self.action = None
self.table = None
self.columns = []
self.setcolumns = []
self.values = []
self.aliases = {}
self.limit = None
self.where = None
self.index = 0
while self.index < len(lex):
symbol = lex[self.index]
if symbol.upper() == "FROM":
# from table select *;
self.index += 1
if self.index < len(lex):
self.table = lex[self.index]
elif symbol.upper() == "SELECT":
# select a, b from table;
self.action = "SELECT"
self.index += 1
self.columns.append(lex[self.index])
self.index += 1
while self.index < len(lex) and lex[self.index].upper() in [",", "AS"]:
sep = lex[self.index]
if sep == ",":
self.index += 1
self.columns.append(lex[self.index])
self.index += 1
elif sep.upper() == "AS":
self.index += 1 # alias
self.aliases[self.columns[-1]] = lex[self.index]
self.index += 1
self.index -= 1
elif symbol.upper() == "DELETE":
# delete from table where item == 1;
self.action = "DELETE"
self.columns = ["gramps_id"]
elif symbol.upper() == "SET":
# SET x=1, y=2
self.index += 1
self.setcolumns.append(lex[self.index]) # first column
self.index += 1 # equal sign
# =
self.index += 1 # value
self.values.append(lex[self.index])
self.index += 1 # comma
while self.index < len(lex) and lex[self.index] == ",":
self.index += 1 # next column
self.setcolumns.append(lex[self.index])
self.index += 1 # equal
# =
self.index += 1 # value
self.values.append(lex[self.index])
self.index += 1 # comma?
self.index -= 1
elif symbol.upper() == "LIMIT":
self.index += 1 # after LIMIT
number = lex[self.index]
self.index += 1 # maybe a comma
if self.index < len(lex) and lex[self.index] == ",":
self.index += 1 # after ","
stop = lex[self.index]
self.limit = (int(number), int(stop))
else:
self.limit = (0, int(number))
self.index -= 1
elif symbol.upper() == "WHERE":
# how can we get all of Python expressions?
# this assumes all by ;
self.index += 1
self.where = lex[self.index]
elif symbol.upper() == "UPDATE":
# update table set x=1, y=2 where condition;
# UPDATE gramps_id set tag_list = Tag("Betty") from person where "Betty" in primary_name.first_name
self.columns = ["gramps_id"]
self.action = "UPDATE"
if self.index < len(lex):
self.index += 1
self.table = lex[self.index]
elif symbol.upper() == "FLAT":
self.flat = True
elif symbol.upper() == "EXPAND":
self.flat = False
elif symbol.upper() == "RAW":
self.raw = True
elif symbol.upper() == "NORAW":
self.raw = False
else:
raise AttributeError("invalid SQL expression: '... %s ...'" % symbol)
self.index += 1
def close(self):
"""
Close up any progress widgets or dialogs.
"""
#try:
# self.progress.close()
#except:
pass
def clean_titles(self, columns):
"""
Take the columns and turn them into strings for the table.
"""
retval = []
for column in columns:
if column in self.aliases:
column = self.aliases[column]
retval.append(column.replace("_", "__"))
return retval
def query(self, query):
self.parse(query)
self.select = 0
start_time = time.time()
class Table():
results = []
def row(self, *args, **kwargs):
self.results.append([args, kwargs])
def get_rows(self):
return [list(item[0]) for item in self.results]
table = Table()
self.sdb = SimpleAccess(self.database)
self.process_table(table) # a class that has .row(1, 2, 3, ...)
print(_("%d rows processed in %s seconds.\n") % (self.select, time.time() - start_time))
return table
def eval(self):
"""
Execute the query.
"""
self.sdb = SimpleAccess(self.database)
self.stab = QuickTable(self.sdb)
self.select = 0
start_time = time.time()
self.process_table(self.stab) # a class that has .row(1, 2, 3, ...)
if self.select > 0:
self.stab.columns(*self.clean_titles(self.columns))
self.sdoc = SimpleDoc(self.document)
self.sdoc.title(self.query_text)
self.sdoc.paragraph("\n")
self.sdoc.paragraph("%d rows processed in %s seconds.\n" % (self.select, time.time() - start_time))
self.stab.write(self.sdoc)
self.sdoc.paragraph("")
return _("%d rows processed in %s seconds.\n") % (self.select, time.time() - start_time)
def get_columns(self, table):
"""
Get the columns for the given table.
"""
if self.database:
retval = self.data[table.lower()]
return retval # [self.name] + retval
else:
return ["*"]
def process_table(self, table):
"""
Given a table name, process the query on the elements of the
table.
"""
# 'Person', 'Family', 'Source', 'Citation', 'Event', 'Media',
# 'Place', 'Repository', 'Note', 'Tag'
# table: a class that has .row(1, 2, 3, ...)
if self.table == "person":
self.do_query(self.sdb.all_people(), table)
elif self.table == "family":
self.do_query(self.sdb.all_families(), table)
elif self.table == "event":
self.do_query(self.sdb.all_events(), table)
elif self.table == "source":
self.do_query(self.sdb.all_sources(), table)
elif self.table == "tag":
self.do_query(self.sdb.all_tags(), table)
elif self.table == "citation":
self.do_query(self.sdb.all_citations(), table)
elif self.table == "media":
self.do_query(self.sdb.all_media(), table)
elif self.table == "place":
self.do_query(self.sdb.all_places(), table)
elif self.table == "repository":
self.do_query(self.sdb.all_repositories(), table)
elif self.table == "note":
self.do_query(self.sdb.all_notes(), table)
else:
raise AttributeError("no such table: '%s'" % self.table)
def get_tag(self, name):
tag = self.database.get_tag_from_name(name)
if tag is None:
tag = gramps.gen.lib.Tag()
tag.set_name(name)
trans_class = self.database.get_transaction_class()
with trans_class("QueryQuickview new Tag", self.database, batch=False) as trans:
self.database.add_tag(tag, trans)
return Handle("Tag", tag.handle)
def make_env(self, **kwargs):
"""
An environment with which to eval elements.
"""
retval= Environment({
_("Date"): gramps.gen.lib.date.Date,
_("Today"): gramps.gen.lib.date.Today(),
"random": random,
"re": re,
"db": self.database,
"sdb": self.sdb,
"lib": gramps.gen.lib,
"_": _,
"Tag": self.get_tag,
})
retval.update(__builtins__)
retval.update(kwargs)
return retval
def stringify(self, value):
"""
Turn the value into an appropriate string representation.
"""
if self.raw:
return value
if isinstance(value, Struct):
return self.stringify(value.struct)
elif isinstance(value, (list, tuple)):
if len(value) == 0 and not self.flat:
return ""
elif len(value) == 1 and not self.flat:
return self.stringify(value[0])
else:
return "[%s]" % (", ".join(map(self.stringify, value)))
elif isinstance(value, PrimaryObject):
return value
else:
return str(value)
def clean(self, values, names):
"""
Given the values and names of columns, change the values
into string versions for the display table.
"""
if self.raw:
return values
retval = []
for i in range(len(values)):
if names[i].endswith("handle"):
retval.append(repr(values[i].struct["handle"]))
else:
retval.append(self.stringify(values[i]))
return retval
def do_query(self, items, table):
"""
Perform the query on the items in the named table.
"""
# table: a class that has .row(1, 2, 3, ...)
with self.database.get_transaction_class()("QueryQuickview", self.database, batch=True) as trans:
ROWNUM = 0
env = self.make_env()
for item in items:
if item is None:
continue
row = []
row_env = []
# "col[0]" in WHERE clause will return first column of selection:
env["col"] = row_env
env["ROWNUM"] = ROWNUM
env["object"] = item
struct = Struct(item.to_struct(), self.database)
env.set_struct(struct)
for col in self.columns:
try:
value = eval(col, env)
except:
value = None
row.append(value)
# allow col[#] reference:
row_env.append(value)
# an alias?
if col in self.aliases:
env[self.aliases[col]] = value
# Should we include this row?
if self.where:
try:
result = eval(self.where, env)
except:
continue
else:
if self.action in ["DELETE", "UPDATE"]:
result = True
else:
result = any([col != None for col in row]) # are they all None?
# If result, then append the row
if result:
if (self.limit is None) or (self.limit[0] <= ROWNUM < self.limit[1]):
if self.action == "SELECT":
if not self.flat:
# Join by rows:
products = []
columns = []
count = 0
for col in row:
if ((isinstance(col, Struct) and isinstance(col.struct, list) and len(col.struct) > 0) or
(isinstance(col, list) and len(col) > 0)):
products.append(col)
columns.append(count)
count += 1
if len(products) > 0:
current = self.clean(row, self.columns)
for items in itertools.product(*products):
for i in range(len(items)):
current[columns[i]] = self.stringify(items[i])
table.row(*current, link=(item.__class__.__name__, item.handle))
self.select += 1
else:
table.row(*self.clean(row, self.columns), link=(item.__class__.__name__, item.handle))
self.select += 1
else:
table.row(*self.clean(row, self.columns), link=(item.__class__.__name__, item.handle))
self.select += 1
elif self.action == "UPDATE":
# update table set col=val, col=val where expr;
table.row(*self.clean(row, self.columns), link=(item.__class__.__name__, item.handle))
self.select += 1
for i in range(len(self.setcolumns)):
struct.setitem(self.setcolumns[i], eval(self.values[i], env), trans=trans)
elif self.action == "DELETE":
table.row(*self.clean(row, self.columns))
self.select += 1
self.database.remove_instance(item, trans)
else:
raise AttributeError("unknown command: '%s'", self.action)
ROWNUM += 1
if (self.limit is not None) and (ROWNUM >= self.limit[1]):
break
def run(database, document, query):
"""
Run the query
"""
retval = ""
dbi = DBI(database, document)
try:
q = dbi.parse(query)
except AttributeError as msg:
return msg
try:
retval = dbi.eval()
except AttributeError as msg:
# dialog?
retval = msg
dbi.close()
return retval
|
gramps-project/addons-source
|
Query/QueryQuickview.py
|
Python
|
gpl-2.0
| 21,665
|
[
"Brian"
] |
30f2d638e73b8254ed678bd1b3baf91d501a5273234b73f0f596c3eb4e44b322
|
# -*- coding: utf-8 -*-#
import Tkinter
import logging
import sys, os
import webbrowser
# set basedir for testing this application
if '__file__' in globals():
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../..')
sys.path.append(basedir)
format = '%(name)s - %(levelname)s - %(filename)s - %(lineno)d - %(message)s'
logging.basicConfig(format=format, level=logging.NOTSET)
# import when running tkroopy
import src.modules.page as page
import src.modules.table as table
import src.modules.console as console
from contrib.pydal import DAL, Field
db = page.Page.db
log = logging.getLogger(__package__)
class Fibonacci_Numbers(page.Page):
"""
App Template - Enter a description of the Application here
"""
production = False
def __init__(self, root, name, *args, **kwargs):
"""
Initiate the Application
You can code widgets here but to not initialise any data. Loading data
should be done in the load method.
"""
page.Page.__init__(self, root, name)
# Displayed in the main menu
self.title = 'Fibonacci Numbers'
self.image_path = 'fibonacci_numbers.gif'
self.models()
# Enter any instance variables here.
@staticmethod
def models():
# Documentation on DAL
# http://www.web2py.com/books/default/chapter/29/06/the-database-abstraction-layer
pass
def load(self):
"""
Initiates the application when the button is clicked in the main menu
"""
page.Page.load(self)
# Remove any previous instances of application UI
try:
self.frame_main.pack_forget()
self.frame_main.destroy()
except:
pass
## self.interior is the frame which sits inside the canvas to enable vertical scrolling
self.frame_main = Tkinter.Frame(self.interior)
self.frame_main.pack()
# Any code here will be run when the application is started from the main menu
self.console_fib = console.Console(self.frame_main, width=50, height=20)
self.console_fib.pack()
self.console_fib.write("This example uses fibonacci numbers to explain how the console widget is threadsafe and will not lock your application while outputting to it.\n\n")
self.console_fib.write("For more information about Fibonacci Numbers visit this ")
self.console_fib.write('link\n\n', self.console_fib.link.add(lambda : webbrowser.open_new('http://en.wikipedia.org/wiki/Fibonacci_number')))
def start():
import threading
t = threading.Thread(target=self.fibonacci_numbers)
t.daemon = True
t.start()
btn_start = Tkinter.Button(self.frame_main, text='Start', command=start)
btn_start.pack(anchor='w')
def fibonacci_numbers(self):
import time
def F():
a,b = 0,1
yield a
yield b
while True:
a, b = b, a + b
yield b
def SubFib(startNumber, endNumber):
for cur in F():
if cur > endNumber: return
if cur >= startNumber:
yield cur
# clear text from console
self.console_fib.clear()
self.console_fib.write("This example uses fibonacci numbers to explain how the console widget is threadsafe and will not lock your application while outputting to it.\n\n")
self.console_fib.write("For more information about Fibonacci Numbers visit this ")
self.console_fib.write('link\n\n', self.console_fib.link.add(lambda : webbrowser.open_new('http://en.wikipedia.org/wiki/Fibonacci_number')))
# output fibonnaci numbers to console
for i in SubFib(0, 100):
time.sleep(0.5)
self.console_fib.write('%s\n' % i)
self.console_fib.write('Done!')
if __name__ == '__main__':
root = Tkinter.Tk()
main = Fibonacci_Numbers(root, "Fibonacci Numbers")
main.load()
main.pack(expand="true", fill="both")
root.mainloop()
exit()
# ------ END OF FILE ----
|
tKroopy/tkroopy
|
src/applications/examples/fibonacci_numbers.py
|
Python
|
gpl-3.0
| 4,165
|
[
"VisIt"
] |
cf28f0bfe450eda68d63c5a372178330ea46b6213bbbf40e12a6a1bee259e0de
|
from datetime import datetime, timedelta
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.utils import timezone
import factory
from . import models
UserModel = get_user_model()
class UserFactory(factory.django.DjangoModelFactory):
email = factory.Sequence(lambda n: 'email_{0}@gmail.com'.format(n))
first_name = factory.Sequence(lambda n: 'first_name_{0}'.format(n))
last_name = factory.Sequence(lambda n: 'last_name_{0}'.format(n))
password = first_name
cell_phone = first_name
cell_phone_is_valid = True
class Meta:
model = UserModel
@classmethod
def _create(cls, model_class, *args, **kwargs):
"""Override the default ``_create`` with our custom call."""
manager = cls._get_manager(model_class)
# The default would use ``manager.create(*args, **kwargs)``
return manager.create_user(*args, **kwargs)
class SiteFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Site
name = factory.Sequence(lambda n: 'name_{0}'.format(n))
description = factory.Sequence(lambda n: 'description_{0}'.format(n))
address = factory.Sequence(lambda n: 'address_{0}'.format(n))
class GroupFactory(factory.django.DjangoModelFactory):
class Meta:
model = Group
@factory.post_generation
def groups(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
self.user_set.add(UserFactory())
class StaffRoleFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.StaffRole
class ActivityTypeFactory(factory.django.DjangoModelFactory):
default_role = factory.SubFactory(StaffRoleFactory)
class Meta:
model = models.ActivityType
class CourtFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Court
site = factory.SubFactory(SiteFactory)
description = factory.Sequence(lambda n: 'description_{0}'.format(n))
admin_group = factory.SubFactory(GroupFactory)
activity_type = factory.SubFactory(ActivityTypeFactory)
class EventFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Event
preliminary_price = factory.Sequence(lambda n: n * 10)
start_at = factory.Sequence(lambda n: timezone.now() + timedelta(days=n))
description = factory.Sequence(lambda n: 'description_{0}'.format(n))
court = factory.SubFactory(CourtFactory)
class VisitFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Visit
user = factory.SubFactory(UserFactory)
event = factory.SubFactory(EventFactory)
class ApplicationFactory(factory.django.DjangoModelFactory):
comment = factory.Sequence(lambda n: 'comment_{0}'.format(n))
user = factory.SubFactory(UserFactory)
event = factory.SubFactory(EventFactory)
status = models.ApplicationStatuses.ACTIVE
class Meta:
model = models.Application
class ProposalFactory(factory.django.DjangoModelFactory):
comment = factory.Sequence(lambda n: 'comment_{0}'.format(n))
user = factory.SubFactory(UserFactory)
event = factory.SubFactory(EventFactory)
status = models.ProposalStatuses.ACTIVE
class Meta:
model = models.Proposal
|
oleg-chubin/let_me_play
|
let_me_app/factories.py
|
Python
|
apache-2.0
| 3,328
|
[
"VisIt"
] |
ca2635922f289851d63ce3638876a4ff261698f7cc87e4b69ec1e822bb6bcd54
|
from queue import Queue, Empty
import jack
import time
from PyQt5.QtGui import QImage, QColor
class midifeedback:
NOTEON = 0x9
NOTEOFF = 0x8
MIDICTRL = 11
NOTE_NAME = ['C', 'C#',
'D', 'D#',
'E',
'F', 'F#',
'G', 'G#',
'A', 'A#',
'B']
GREEN = 60
RED = 15
AMBER = 63
BLACK = 12
def __init__(self):
self.queue_out = Queue()
self.client = jack.Client("Launchpad Midi Feedback")
self.midi_out = self.client.midi_outports.register("output")
self.client.set_process_callback(self.my_callback)
self.client.activate()
def my_callback(self, frames):
self.midi_out.clear_buffer()
try:
i = 1
while True:
note = self.queue_out.get(block=False)
self.midi_out.write_midi_event(i, note)
i += 1
except Empty:
pass
return jack.CALL_AGAIN
def blackall(self):
for pitch in range(128):
note = ((self.NOTEON << 4) + 0, pitch, 12)
self.queue_out.put(note)
def closeall(self):
self.client.deactivate()
self.client.close()
def onenote(self, pitchx, pitchy, color=GREEN):
pitch = pitchx + (pitchy * 16)
note = ((self.NOTEON << 4) + 0, pitch, color)
self.queue_out.put(note)
def drawImg(self, path):
img = QImage(path)
for x in range(img.width()):
for y in range(img.height()):
red, green, blue, a = QColor(img.pixel(x, y)).getRgb()
if red > green and red > blue:
color = midifeedback.RED
elif green > red and green > blue:
color = midifeedback.GREEN
elif red == green and green > blue:
color = midifeedback.AMBER
else:
color = midifeedback.BLACK
print(
"r:{},g:{},b:{},color:{}".format(red, green, blue, color))
self.onenote(x, y, color)
#time.sleep(0.03)
def lightFancy(self):
color = 60
fancylist = [19, 18, 33, 50, 67, 82, 81, 36, 37, 54, 69, 86, 101, 100,
84, 68, 52]
for pitch in fancylist:
note = ((self.NOTEON << 4) + 0, pitch, color)
self.queue_out.put(note)
def blink(self):
for pitch in range(128):
note = ((self.NOTEON << 4) + 0, pitch, 60)
self.queue_out.put(note)
time.sleep(0.1)
note = ((self.NOTEON << 4) + 0, pitch, 12)
self.queue_out.put(note)
def soft_blink(self, channel, pitch, blink_time, blink_color):
while True:
note = ((self.NOTEON << 4) + channel, pitch, blink_color)
self.queue_out.put(note)
time.sleep(blink_time)
note = ((self.NOTEON << 4) + channel, pitch, self.BLACK)
self.queue_out.put(note)
time.sleep(blink_time)
def tetris(self):
color = 60
fancylist = [(2, 0), (3, 0), (4, 0), (2, -1)]
for row in range(8):
l = [(x, y + row) for x, y in fancylist]
for x, y in l:
if min(x, y) >= 0:
# print("x%s y%s" % (x, y))
self.onenote(x, y)
time.sleep(1)
self.blackall()
# use:
# import midifeedback
# test = midifeedback.midifeedback()
# test.lightFancy()
# test.blackall()
# test.closeall()
# reload(midifeedback)
# test.onenote(x,y)
# test.onenote(2,2,midifeedback.midifeedback.RED)
# test.soft_blink(0, 32, 0.25, midifeedback.midifeedback().GREEN)
|
sonejostudios/midifeedback
|
midifeedback.py
|
Python
|
gpl-2.0
| 3,894
|
[
"Amber"
] |
a77a27b934185fe5018caf34fd85125e60bd6b29cab0d087d464a27fdbb223ba
|
#!/usr/bin/env python
"""
Reorder the atoms in the Angles section of a data file to make sure that
atoms have a "canonical order" (for example the first atom has a lower
id than the last atom, for angle and dihedral interactions.
(This helps us detect potential problems like dupicate Angle interactions.)
"""
import sys
from operator import itemgetter
g_program_name = __file__.split('/')[-1]
in_stream = sys.stdin
section_name = ''
if len(sys.argv) == 2:
section_name = sys.argv[1]
else:
sys.stderr.write('Usage Example:\n\n'
' '+g_program_name+' Angles < angles.txt > new_angles.txt\n\n'
' In this example \"angles.txt\" contains only the \"Angles\" section of\n'
' a LAMMPS DATA file. (Either a text-editor, or the \n'
' \"extract_lammps_data.py\" script can be used to select a section from\n'
' a LAMMPS DATA file\n\n'
'Error('+g_program_name+'): expected exactly one argument:\n'
' \"Angles\", \"Dihedrals\", or \"Impropers\"\n')
exit(-1)
# Ordering rules are defined in a seperate module named
# nbody_Angles.py, nbody_Dihedrals.py, nbody_Impropers.py
# Load that now.
module_name = 'nbody_'+section_name
g = __import__(module_name) #defines g.bond_pattern, g.canonical_order
# This module defines the graph representing the bond pattern for this type
# of interaction. (The number of vertices and edges for the graph corresponds
# to the number of atoms and bonds in this type of interaction.)
natoms = g.bond_pattern.GetNumVerts()
nbonds = g.bond_pattern.GetNumEdges()
for line_orig in in_stream:
line = line_orig.rstrip('\n')
comment = ''
if '#' in line_orig:
ic = line.find('#')
line = line_orig[:ic]
comment = ' '+line_orig[ic:].rstrip('\n')
tokens = line.strip().split()
swapped = False
if len(tokens) == 2+natoms:
all_integers = True
abids_l = [[0 for i in range(0, natoms)],
[0 for i in range(0, nbonds)]]
for i in range(0, natoms):
if not tokens[2+i].isdigit():
all_integers = False
if all_integers:
for i in range(0, natoms):
abids_l[0][i] = int(tokens[2+i])
else:
for i in range(0, natoms):
abids_l[0][i] = tokens[2+i]
abids = g.canonical_order( (tuple(abids_l[0]), tuple(abids_l[1])) )
for i in range(0, natoms):
tokens[2+i] = str(abids[0][i])
sys.stdout.write(' '.join(tokens)+comment+'\n')
|
anshumang/lammps-analytics
|
tools/moltemplate/src/nbody_reorder_atoms.py
|
Python
|
gpl-2.0
| 2,656
|
[
"LAMMPS"
] |
38ba8c1f20d0d1529c3d977f636be32d0b9348cf1624491f284b47922393dd53
|
# -*- coding: utf-8 -*-
"""
This code combines two lammps data files, with clay.data as the second file whose index is modified.
The clay.data file should first be handled with redundant conncetions, i.e. after using removeRedundantTopo.py
Currently support CLASS I FF for the first file, i.e. no bb, ba, aa, etc
Future work: write this code in OOP style, because we actually use two data file (regarded as one class).
"""
# hyper parameter
dirName = r"F:\simulations\asphaltenes\kaolinite\cvff-clayff/"
firstDataFileName = "vo-kaolinite.data"
secondDataFileName = "generated.data"
class Data:
def __init__(self, dirName, fileName):
self.dirName = dirName
self.fileName = fileName
with open(dirName + fileName, 'r') as foo:
self.dataFile = foo.readlines()
# print self.dataFile[0:100]
self.emptyLines = []
for i, line in enumerate(self.dataFile):
if line.startswith(('L','A','B','D','I','P','M')):
self.dataFile.pop(i)
# print self.dataFile[0:100]
for i, line in enumerate(self.dataFile):
if line == '\n' and self.dataFile[i-1] == '\n':
self.dataFile.pop(i)
# divide file in to different sections by empty lines
for i, line in enumerate(self.dataFile):
if line == '\n':
self.emptyLines.append(i)
self.snumber = self.dataFile[:self.emptyLines[0]]
try:
self.stype, self.spbc, self.smass, self.spaircoeff, self.sbondcoeff, self.sanglecoeff, self.sdihedralcoeff,\
self.simpropercoeff, self.satom, self.sbond, self.sangle, self.sdihedral, self.simproper = \
(self.dataFile[self.emptyLines[i]+1:self.emptyLines[i+1]] for i in range(len(self.emptyLines)-1))
except ValueError:
print "No dihedral and improper sections in this file."
self.stype, self.spbc, self.smass, self.spaircoeff, self.sbondcoeff, self.sanglecoeff,\
self.satom, self.sbond, self.sangle = \
(self.dataFile[self.emptyLines[i]+1:self.emptyLines[i+1]] for i in range(len(self.emptyLines)-1))
# deal with different sections
# snumber, stype
self.nAtoms, self.nBonds, self.nAngles, self.nDihedrals, self.nImpropers = (int(line.split()[0]) for line in self.snumber)
print self.nAtoms, self.nBonds, self.nAngles, self.nDihedrals, self.nImpropers
self.kAtoms, self.kBonds, self.kAngles, self.kDihedrals, self.kImpropers = (int(line.split()[0]) for line in self.stype)
print self.kAtoms, self.kBonds, self.kAngles, self.kDihedrals, self.kImpropers
#spbc smass, spaircoeff do not need further formatting
first = Data(dirName, firstDataFileName)
second = Data(dirName, secondDataFileName)
print first.sbondcoeff
#deal with the index in the second data file
# smass
smassnew = []
for line in second.smass:
bar = line.split()
print bar
bar[0] = int(bar[0]) + first.kAtoms
bar[0] = str(bar[0])
smassnew.append(" ".join(bar))
print smassnew
# spaircoeff
spaircoeffnew = []
for line in second.spaircoeff:
bar = line.split()
bar[0] = int(bar[0]) + first.kAtoms
bar[0] = str(bar[0])
spaircoeffnew.append(" ".join(bar))
# sbondcoeff
sbondcoeffnew = []
for line in second.sbondcoeff:
bar = line.split()
bar[0] = int(bar[0]) + first.kBonds
bar[0] = str(bar[0])
sbondcoeffnew.append(" ".join(bar))
# sanglecoeff
sanglecoeffnew = []
for line in second.sanglecoeff:
bar = line.split()
bar[0] = int(bar[0]) + first.kAngles
bar[0] = str(bar[0])
sanglecoeffnew.append(" ".join(bar))
# satoms
satomnew = []
for line in second.satom:
bar = line.split()
bar[0] = int(bar[0]) + first.nAtoms
bar[0] = str(bar[0])
bar[2] = int(bar[2]) + first.kAtoms
bar[2] = str(bar[2])
satomnew.append(" ".join(bar))
# sbonds
sbondnew = []
for line in second.sbond:
bar = line.split()
bar[0] = int(bar[0]) + first.nBonds
bar[0] = str(bar[0])
bar[1] = int(bar[1]) + first.kBonds
bar[1] = str(bar[1])
bar[2] = int(bar[2]) + first.nAtoms
bar[2] = str(bar[2])
bar[3] = int(bar[3]) + first.nAtoms
bar[3] = str(bar[3])
sbondnew.append(" ".join(bar))
# sangles
sanglenew = []
for line in second.sangle:
bar = line.split()
bar[0] = int(bar[0]) + first.nAngles
bar[0] = str(bar[0])
bar[1] = int(bar[1]) + first.kAngles
bar[1] = str(bar[1])
bar[2] = int(bar[2]) + first.nAtoms
bar[2] = str(bar[2])
bar[3] = int(bar[3]) + first.nAtoms
bar[3] = str(bar[3])
bar[4] = int(bar[4]) + first.nAtoms
bar[4] = str(bar[4])
sanglenew.append(" ".join(bar))
#combine two files
with open(first.dirName + 'combined.data', 'w') as foo:
print >> foo, "Lammps date file generated with riddle's code from %s and %s\n"%(firstDataFileName, secondDataFileName)
print >> foo, " %d atoms\n %d bonds\n %d angles\n %d dihedrals\n %d impropers\n"\
%(first.nAtoms+second.nAtoms, first.nBonds+second.nBonds, first.nAngles+second.nAngles, first.nDihedrals+second.nDihedrals, first.nImpropers+second.nImpropers)
print >> foo, " %d atom types\n %d bond types\n %d angle types\n %d dihedral types\n %d improper types\n"\
%(first.kAtoms+second.kAtoms, first.kBonds+second.kBonds, first.kAngles+second.kAngles, first.kDihedrals+second.kDihedrals, first.kImpropers+second.kImpropers)
for line in first.spbc:
print >> foo, line[:-1]
print >> foo, "\nMasses\n"
for line in first.smass:
print >> foo, line[:-1]
for line in smassnew:
print >> foo, ' '+ line
print >> foo, "\nPair Coeffs # lj/cut/coul/long\n"
for line in first.spaircoeff:
print >> foo, line[:-1]
for line in spaircoeffnew:
print >> foo, ' '+ line
print >> foo, "\nBond Coeffs # harmonic\n"
for line in first.sbondcoeff:
print >> foo, ' '+ line[:-1]
for line in sbondcoeffnew:
print >> foo, ' '+ line
print >> foo, "\nAngle Coeffs # harmonic\n"
for line in first.sanglecoeff:
print >> foo, ' '+ line[:-1]
for line in sanglecoeffnew:
print >> foo, ' '+ line
print >> foo, "\nDihedral Coeffs # harmonic\n"
for line in first.sdihedralcoeff:
print >> foo, line[:-1]
print >> foo, "\nImproper Coeffs # cvff\n"
for line in first.simpropercoeff:
print >> foo, line[:-1]
print >> foo, "\nAtoms\n"
for line in first.satom:
print >> foo, line[:-1]
for line in satomnew:
print >> foo, ' '+ line
print >> foo, "\nBonds\n"
for line in first.sbond:
print >> foo, line[:-1]
for line in sbondnew:
print >> foo, ' '+ line
print >> foo, "\nAngles\n"
for line in first.sangle:
print >> foo, line[:-1]
for line in sanglenew:
print >> foo, ' '+ line
print >> foo, "\nDihedrals\n"
for line in first.sdihedral:
print >> foo, line[:-1]
print >> foo, "\nImpropers\n"
for line in first.simproper:
print >> foo, line[:-1]
# this last \n can not be deleted, otherwise the generated file cannot be re-processed \
# by this code, which is useful when combine two data file into one.
print >> foo, '\n'
|
riddlezyc/geolab
|
src/io/combineTwofile.py
|
Python
|
gpl-3.0
| 7,532
|
[
"LAMMPS"
] |
0b034fcab0a287e4d8c39b4ec43f130d3fcbb3d591f91afd9353a1eebb275b31
|
"""
Rendering utils
@author: Chris Scott
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import ctypes as C
import logging
import numpy as np
import vtk
from vtk.util import numpy_support
from six.moves import range
################################################################################
class NumpyVTKData(object):
"""
Hold paired numpy and VTK data.
"""
def __init__(self, data_numpy, name=None):
self._numpy = data_numpy
self._vtk = numpy_support.numpy_to_vtk(data_numpy)
if name is not None:
self._vtk.SetName(name)
def getVTK(self):
"""Return VTK array."""
return self._vtk
def getNumpy(self):
"""Return Numpy array."""
return self._numpy
################################################################################
class ActorObject(object):
"""
Holds a VTK actor and a boolean to say whether it is loaded
"""
def __init__(self, actor):
self.actor = actor
self.visible = False
################################################################################
def getScalarsType(colouringOptions):
"""
Return scalars type based on colouring options
"""
# scalar type
if colouringOptions.colourBy == "Species" or colouringOptions.colourBy == "Solid colour":
scalarType = 0
elif colouringOptions.colourBy == "Height":
scalarType = 1
elif colouringOptions.colourBy == "Charge":
scalarType = 4
else:
scalarType = 5
return scalarType
################################################################################
# call back class
class RGBCallBackClass(object):
CFUNCTYPE = C.CFUNCTYPE(C.c_long, C.c_int)
def __init__(self, lut):
self._lut = lut
self._rgb = np.empty(3, np.float64)
def __call__(self, scalar):
"""
Calculate RGB
"""
# rgb array
self._rgb = np.empty(3, np.float64)
# colour
self._lut.GetColor(scalar, self._rgb)
print("RGB CALLBACK; scalar %f; rgb %r" % (scalar, self._rgb))
return self._rgb.ctypes.data_as(C.c_void_p).value
def getcfunc(self):
return self.CFUNCTYPE(self)
cfunc = property(getcfunc)
################################################################################
def makeScalarBar(lut, colouringOptions, text_colour, renderingPrefs):
"""
Make a scalar bar
"""
scalarBar = vtk.vtkScalarBarActor()
scalarBar.SetLookupTable(lut)
if colouringOptions.colourBy == "Height":
title = colouringOptions.scalarBarText
elif colouringOptions.colourBy == "Charge":
title = str(colouringOptions.scalarBarTextEdit3.text())
else:
title = str(colouringOptions.scalarBarTexts[colouringOptions.colourBy].text())
scalarBar.SetTitle(title)
scalarBar.SetOrientationToHorizontal()
scalarBar.SetNumberOfLabels(renderingPrefs.numScalarBarLabels)
if renderingPrefs.enableFmtScalarBarLabels:
scalarBar.SetLabelFormat(renderingPrefs.fmtScalarBarLabels)
lprop = scalarBar.GetTitleTextProperty()
lprop.SetColor(text_colour)
lprop.ItalicOff()
lprop.BoldOn()
lprop.SetFontSize(20)
lprop.SetFontFamilyToArial()
lprop = scalarBar.GetLabelTextProperty()
lprop.SetColor(text_colour)
lprop.ItalicOff()
lprop.BoldOn()
lprop.SetFontSize(10)
lprop.SetFontFamilyToArial()
scalarBar.SetWidth(0.85)
scalarBar.GetPositionCoordinate().SetValue(0.1, 0.01)
scalarBar.SetHeight(0.12)
return scalarBar
################################################################################
def setMapperScalarRange(mapper, colouringOptions, NSpecies):
"""
Set scalar range on mapper
"""
if colouringOptions.colourBy == "Species" or colouringOptions.colourBy == "Solid colour":
mapper.SetScalarRange(0, NSpecies - 1)
elif colouringOptions.colourBy == "Height":
mapper.SetScalarRange(colouringOptions.minVal, colouringOptions.maxVal)
elif colouringOptions.colourBy == "Charge":
mapper.SetScalarRange(colouringOptions.chargeMinSpin.value(), colouringOptions.chargeMaxSpin.value())
else:
mapper.SetScalarRange(colouringOptions.scalarMinSpins[colouringOptions.colourBy].value(),
colouringOptions.scalarMaxSpins[colouringOptions.colourBy].value())
################################################################################
def getScalar(colouringOptions, lattice, atomIndex, scalarVal=None):
"""
Return the correct scalar value for using with LUT
"""
if colouringOptions.colourBy == "Species" or colouringOptions.colourBy == "Solid colour":
scalar = lattice.specie[atomIndex]
elif colouringOptions.colourBy == "Height":
scalar = lattice.pos[3*atomIndex+colouringOptions.heightAxis]
elif colouringOptions.colourBy == "Charge":
scalar = lattice.charge[atomIndex]
elif scalarVal is not None:
scalar = scalarVal
else:
scalar = lattice.specie[atomIndex]
return scalar
################################################################################
def setupLUT(specieList, specieRGB, colouringOptions):
"""
Setup the colour look up table
"""
logger = logging.getLogger(__name__)
logger.debug("Setting up LUT")
lut = vtk.vtkLookupTable()
if colouringOptions.colourBy == "Species" or colouringOptions.colourBy == "Solid colour":
NSpecies = len(specieList)
lut.SetNumberOfColors(NSpecies)
lut.SetNumberOfTableValues(NSpecies)
lut.SetTableRange(0, NSpecies - 1)
lut.SetRange(0, NSpecies - 1)
for i in range(NSpecies):
if colouringOptions.colourBy == "Species":
lut.SetTableValue(i, specieRGB[i][0], specieRGB[i][1], specieRGB[i][2], 1.0)
elif colouringOptions.colourBy == "Solid colour":
lut.SetTableValue(i, colouringOptions.solidColourRGB[0], colouringOptions.solidColourRGB[1], colouringOptions.solidColourRGB[2])
elif colouringOptions.colourBy == "Height":
lut.SetNumberOfColors(1024)
lut.SetHueRange(0.667,0.0)
lut.SetRange(colouringOptions.minVal, colouringOptions.maxVal)
lut.SetRampToLinear()
lut.Build()
elif colouringOptions.colourBy == "Charge":
lut.SetNumberOfColors(1024)
lut.SetHueRange(0.667,0.0)
lut.SetRange(colouringOptions.chargeMinSpin.value(), colouringOptions.chargeMaxSpin.value())
lut.SetRampToLinear()
lut.Build()
else:
lut.SetNumberOfColors(1024)
lut.SetHueRange(0.667,0.0)
lut.SetRange(colouringOptions.scalarMinSpins[colouringOptions.colourBy].value(),
colouringOptions.scalarMaxSpins[colouringOptions.colourBy].value())
lut.SetRampToLinear()
lut.Build()
return lut
################################################################################
def setRes(num, displayOptions):
#res = 15.84 * (0.99999**natoms)
#if(LowResVar.get()=="LowResOff"):
if(num==0):
res = 100
else:
# #if(ResVar.get()=="LowResOn"):
# #
# # res = -1.0361*math.log(num,e) + 14.051
# # #res = round(res,0)
# # #res = 176*(num**-0.36)
# # res = int(res)
# #
# #elif(ResVar.get()=="HighResOn"):
# #
# # res = -2.91*math.log(num,e) + 35
# # res = round(res,0)
# # res = 370*(num**-0.36)
# # res = int(res)
# #
# #else:
#
# res = -2.91*math.log(num,2.7) + 35
# res = round(res,0)
# res = 170*(num**-0.36)
# res = int(res)
res = int(displayOptions.resA * num ** (-displayOptions.resB))
logger = logging.getLogger(__name__)
logger.debug("Setting sphere resolution (N = %d): %d", num, res)
return res
|
chrisdjscott/Atoman
|
atoman/rendering/utils.py
|
Python
|
mit
| 8,308
|
[
"VTK"
] |
314f1baef94baa65a4cb3930deaa6210a718d87bd05f93a5529f7685be06d25f
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
__author__ = 'Brian Wickman'
import os
import errno
import time
def _tail_lines(fd, linesback=10):
if fd is None:
return
# Contributed to Python Cookbook by Ed Pascoe (2003)
avgcharsperline = 75
while True:
try:
fd.seek(int(-1 * avgcharsperline * linesback), 2)
except IOError:
fd.seek(0)
atstart = fd.tell() == 0
lines = fd.read().splitlines()
if atstart or len(lines) > (linesback + 1):
break
avgcharsperline = avgcharsperline * 1.3
if len(lines) > linesback:
start = len(lines) - linesback - 1
else:
start = 0
return lines[start:start+linesback]
def wait_until_opened(filename, forever=True, clock=time):
while True:
try:
return open(filename, 'r')
except OSError as e:
if e.errno == errno.ENOENT:
if forever:
clock.sleep(1)
else:
return None
else:
raise
def tail(filename, lines=10):
with open(filename, 'r') as fp:
for line in _tail_lines(fp, lines):
yield line
def tail_f(filename, forever=True, include_last=False, clock=time):
fd = wait_until_opened(filename, forever, clock)
# wind back to near the end of the file...
last_lines = _tail_lines(fd, 10)
while True:
if fd is None:
return
where = fd.tell()
if last_lines:
yield last_lines.pop(0)
continue
else:
line = fd.readline()
if line:
yield line
else:
# check health of the file descriptor.
fd_results = os.fstat(fd.fileno())
try:
st_results = None
st_results = os.stat(filename)
except OSError as e:
if e.errno == errno.ENOENT:
fd = wait_until_opened(filename, forever, clock)
continue
else:
raise
# file changed from underneath us, reopen
if fd_results.st_ino != st_results.st_ino:
fd.close()
fd = wait_until_opened(filename, forever, clock)
continue
if st_results.st_size < where:
# file truncated, rewind
fd.seek(0)
else:
# our buffer has not yet caught up, wait.
clock.sleep(1)
fd.seek(where)
|
foursquare/commons-old
|
src/python/twitter/common/dirutil/tail.py
|
Python
|
apache-2.0
| 3,069
|
[
"Brian"
] |
c4de150a8e26a97da4304e70a8058ad9e2a45f8624cf458b34e5fa734912b074
|
# (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import tempfile
import tarfile
from subprocess import Popen, PIPE
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.six import string_types
from ansible.playbook.role.definition import RoleDefinition
__all__ = ['RoleRequirement']
VALID_SPEC_KEYS = [
'name',
'role',
'scm',
'src',
'version',
]
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class RoleRequirement(RoleDefinition):
"""
Helper class for Galaxy, which is used to parse both dependencies
specified in meta/main.yml and requirements.yml files.
"""
def __init__(self):
pass
@staticmethod
def repo_url_to_role_name(repo_url):
# gets the role name out of a repo like
# http://git.example.com/repos/repo.git" => "repo"
if '://' not in repo_url and '@' not in repo_url:
return repo_url
trailing_path = repo_url.split('/')[-1]
if trailing_path.endswith('.git'):
trailing_path = trailing_path[:-4]
if trailing_path.endswith('.tar.gz'):
trailing_path = trailing_path[:-7]
if ',' in trailing_path:
trailing_path = trailing_path.split(',')[0]
return trailing_path
@staticmethod
def role_yaml_parse(role):
if isinstance(role, string_types):
name = None
scm = None
src = None
version = None
if ',' in role:
if role.count(',') == 1:
(src, version) = role.strip().split(',', 1)
elif role.count(',') == 2:
(src, version, name) = role.strip().split(',', 2)
else:
raise AnsibleError("Invalid role line (%s). Proper format is 'role_name[,version[,name]]'" % role)
else:
src = role
if name is None:
name = RoleRequirement.repo_url_to_role_name(src)
if '+' in src:
(scm, src) = src.split('+', 1)
return dict(name=name, src=src, scm=scm, version=version)
if 'role' in role:
name = role['role']
if ',' in name:
raise AnsibleError("Invalid old style role requirement: %s" % name)
else:
del role['role']
role['name'] = name
else:
role = role.copy()
if 'src'in role:
# New style: { src: 'galaxy.role,version,name', other_vars: "here" }
if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
role["src"] = "git+" + role["src"]
if '+' in role["src"]:
(scm, src) = role["src"].split('+')
role["scm"] = scm
role["src"] = src
if 'name' not in role:
role["name"] = RoleRequirement.repo_url_to_role_name(role["src"])
if 'version' not in role:
role['version'] = ''
if 'scm' not in role:
role['scm'] = None
for key in list(role.keys()):
if key not in VALID_SPEC_KEYS:
role.pop(key)
return role
@staticmethod
def scm_archive_role(src, scm='git', name=None, version='HEAD', keep_scm_meta=False):
def run_scm_cmd(cmd, tempdir):
try:
stdout = ''
stderr = ''
popen = Popen(cmd, cwd=tempdir, stdout=PIPE, stderr=PIPE)
stdout, stderr = popen.communicate()
except Exception as e:
ran = " ".join(cmd)
display.debug("ran %s:" % ran)
display.debug("\tstdout: " + stdout)
display.debug("\tstderr: " + stderr)
raise AnsibleError("when executing %s: %s" % (ran, to_native(e)))
if popen.returncode != 0:
raise AnsibleError("- command %s failed in directory %s (rc=%s)" % (' '.join(cmd), tempdir, popen.returncode))
if scm not in ['hg', 'git']:
raise AnsibleError("- scm %s is not currently supported" % scm)
try:
scm_path = get_bin_path(scm)
except (ValueError, OSError, IOError):
raise AnsibleError("could not find/use %s, it is required to continue with installing %s" % (scm, src))
tempdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP)
clone_cmd = [scm_path, 'clone', src, name]
run_scm_cmd(clone_cmd, tempdir)
if scm == 'git' and version:
checkout_cmd = [scm_path, 'checkout', version]
run_scm_cmd(checkout_cmd, os.path.join(tempdir, name))
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar', dir=C.DEFAULT_LOCAL_TMP)
archive_cmd = None
if keep_scm_meta:
display.vvv('tarring %s from %s to %s' % (name, tempdir, temp_file.name))
with tarfile.open(temp_file.name, "w") as tar:
tar.add(os.path.join(tempdir, name), arcname=name)
elif scm == 'hg':
archive_cmd = [scm_path, 'archive', '--prefix', "%s/" % name]
if version:
archive_cmd.extend(['-r', version])
archive_cmd.append(temp_file.name)
elif scm == 'git':
archive_cmd = [scm_path, 'archive', '--prefix=%s/' % name, '--output=%s' % temp_file.name]
if version:
archive_cmd.append(version)
else:
archive_cmd.append('HEAD')
if archive_cmd is not None:
display.vvv('archiving %s' % archive_cmd)
run_scm_cmd(archive_cmd, os.path.join(tempdir, name))
return temp_file.name
|
alexlo03/ansible
|
lib/ansible/playbook/role/requirement.py
|
Python
|
gpl-3.0
| 6,833
|
[
"Galaxy"
] |
a8390b4cc191019042444c87c2ef697cf7b7f819a6f3d4a665a45b3e41ceb02b
|
"""
This is only meant to add docs to objects defined in C-extension modules.
The purpose is to allow easier editing of the docstrings without
requiring a re-compile.
NOTE: Many of the methods of ndarray have corresponding functions.
If you update these docstrings, please keep also the ones in
core/fromnumeric.py, core/defmatrix.py up-to-date.
"""
from __future__ import division, absolute_import, print_function
from numpy.lib import add_newdoc
###############################################################################
#
# flatiter
#
# flatiter needs a toplevel description
#
###############################################################################
add_newdoc('numpy.core', 'flatiter',
"""
Flat iterator object to iterate over arrays.
A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in row-major, C-style order (the last
index varying the fastest). The iterator can also be indexed using
basic slicing or advanced indexing.
See Also
--------
ndarray.flat : Return a flat iterator over an array.
ndarray.flatten : Returns a flattened copy of an array.
Notes
-----
A `flatiter` iterator can not be constructed directly from Python code
by calling the `flatiter` constructor.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> type(fl)
<type 'numpy.flatiter'>
>>> for item in fl:
... print(item)
...
0
1
2
3
4
5
>>> fl[2:4]
array([2, 3])
""")
# flatiter attributes
add_newdoc('numpy.core', 'flatiter', ('base',
"""
A reference to the array that is iterated over.
Examples
--------
>>> x = np.arange(5)
>>> fl = x.flat
>>> fl.base is x
True
"""))
add_newdoc('numpy.core', 'flatiter', ('coords',
"""
An N-dimensional tuple of current coordinates.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.coords
(0, 0)
>>> fl.next()
0
>>> fl.coords
(0, 1)
"""))
add_newdoc('numpy.core', 'flatiter', ('index',
"""
Current flat index into the array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.index
0
>>> fl.next()
0
>>> fl.index
1
"""))
# flatiter functions
add_newdoc('numpy.core', 'flatiter', ('__array__',
"""__array__(type=None) Get array from iterator
"""))
add_newdoc('numpy.core', 'flatiter', ('copy',
"""
copy()
Get a copy of the iterator as a 1-D array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> fl = x.flat
>>> fl.copy()
array([0, 1, 2, 3, 4, 5])
"""))
###############################################################################
#
# nditer
#
###############################################################################
add_newdoc('numpy.core', 'nditer',
"""
Efficient multi-dimensional iterator object to iterate over arrays.
To get started using this object, see the
:ref:`introductory guide to array iteration <arrays.nditer>`.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
flags : sequence of str, optional
Flags to control the behavior of the iterator.
* "buffered" enables buffering when required.
* "c_index" causes a C-order index to be tracked.
* "f_index" causes a Fortran-order index to be tracked.
* "multi_index" causes a multi-index, or a tuple of indices
with one per iteration dimension, to be tracked.
* "common_dtype" causes all the operands to be converted to
a common data type, with copying or buffering as necessary.
* "delay_bufalloc" delays allocation of the buffers until
a reset() call is made. Allows "allocate" operands to
be initialized before their values are copied into the buffers.
* "external_loop" causes the `values` given to be
one-dimensional arrays with multiple values instead of
zero-dimensional arrays.
* "grow_inner" allows the `value` array sizes to be made
larger than the buffer size when both "buffered" and
"external_loop" is used.
* "ranged" allows the iterator to be restricted to a sub-range
of the iterindex values.
* "refs_ok" enables iteration of reference types, such as
object arrays.
* "reduce_ok" enables iteration of "readwrite" operands
which are broadcasted, also known as reduction operands.
* "zerosize_ok" allows `itersize` to be zero.
op_flags : list of list of str, optional
This is a list of flags for each operand. At minimum, one of
"readonly", "readwrite", or "writeonly" must be specified.
* "readonly" indicates the operand will only be read from.
* "readwrite" indicates the operand will be read from and written to.
* "writeonly" indicates the operand will only be written to.
* "no_broadcast" prevents the operand from being broadcasted.
* "contig" forces the operand data to be contiguous.
* "aligned" forces the operand data to be aligned.
* "nbo" forces the operand data to be in native byte order.
* "copy" allows a temporary read-only copy if required.
* "updateifcopy" allows a temporary read-write copy if required.
* "allocate" causes the array to be allocated if it is None
in the `op` parameter.
* "no_subtype" prevents an "allocate" operand from using a subtype.
* "arraymask" indicates that this operand is the mask to use
for selecting elements when writing to operands with the
'writemasked' flag set. The iterator does not enforce this,
but when writing from a buffer back to the array, it only
copies those elements indicated by this mask.
* 'writemasked' indicates that only elements where the chosen
'arraymask' operand is True will be written to.
op_dtypes : dtype or tuple of dtype(s), optional
The required data type(s) of the operands. If copying or buffering
is enabled, the data will be converted to/from their original types.
order : {'C', 'F', 'A', 'K'}, optional
Controls the iteration order. 'C' means C order, 'F' means
Fortran order, 'A' means 'F' order if all the arrays are Fortran
contiguous, 'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible. This also
affects the element memory order of "allocate" operands, as they
are allocated to be compatible with iteration order.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when making a copy
or buffering. Setting this to 'unsafe' is not recommended,
as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
op_axes : list of list of ints, optional
If provided, is a list of ints or None for each operands.
The list of axes for an operand is a mapping from the dimensions
of the iterator to the dimensions of the operand. A value of
-1 can be placed for entries, causing that dimension to be
treated as "newaxis".
itershape : tuple of ints, optional
The desired shape of the iterator. This allows "allocate" operands
with a dimension mapped by op_axes not corresponding to a dimension
of a different operand to get a value not equal to 1 for that
dimension.
buffersize : int, optional
When buffering is enabled, controls the size of the temporary
buffers. Set to 0 for the default value.
Attributes
----------
dtypes : tuple of dtype(s)
The data types of the values provided in `value`. This may be
different from the operand data types if buffering is enabled.
finished : bool
Whether the iteration over the operands is finished or not.
has_delayed_bufalloc : bool
If True, the iterator was created with the "delay_bufalloc" flag,
and no reset() function was called on it yet.
has_index : bool
If True, the iterator was created with either the "c_index" or
the "f_index" flag, and the property `index` can be used to
retrieve it.
has_multi_index : bool
If True, the iterator was created with the "multi_index" flag,
and the property `multi_index` can be used to retrieve it.
index
When the "c_index" or "f_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
and `has_index` is False.
iterationneedsapi : bool
Whether iteration requires access to the Python API, for example
if one of the operands is an object array.
iterindex : int
An index which matches the order of iteration.
itersize : int
Size of the iterator.
itviews
Structured view(s) of `operands` in memory, matching the reordered
and optimized iterator access pattern.
multi_index
When the "multi_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
accessed and `has_multi_index` is False.
ndim : int
The iterator's dimension.
nop : int
The number of iterator operands.
operands : tuple of operand(s)
The array(s) to be iterated over.
shape : tuple of ints
Shape tuple, the shape of the iterator.
value
Value of `operands` at current iteration. Normally, this is a
tuple of array scalars, but if the flag "external_loop" is used,
it is a tuple of one dimensional arrays.
Notes
-----
`nditer` supersedes `flatiter`. The iterator implementation behind
`nditer` is also exposed by the NumPy C API.
The Python exposure supplies two iteration interfaces, one which follows
the Python iterator protocol, and another which mirrors the C-style
do-while pattern. The native Python approach is better in most cases, but
if you need the iterator's coordinates or index, use the C-style pattern.
Examples
--------
Here is how we might write an ``iter_add`` function, using the
Python iterator protocol::
def iter_add_py(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
for (a, b, c) in it:
addop(a, b, out=c)
return it.operands[2]
Here is the same function, but following the C-style pattern::
def iter_add(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
while not it.finished:
addop(it[0], it[1], out=it[2])
it.iternext()
return it.operands[2]
Here is an example outer product function::
def outer_it(x, y, out=None):
mulop = np.multiply
it = np.nditer([x, y, out], ['external_loop'],
[['readonly'], ['readonly'], ['writeonly', 'allocate']],
op_axes=[range(x.ndim)+[-1]*y.ndim,
[-1]*x.ndim+range(y.ndim),
None])
for (a, b, c) in it:
mulop(a, b, out=c)
return it.operands[2]
>>> a = np.arange(2)+1
>>> b = np.arange(3)+1
>>> outer_it(a,b)
array([[1, 2, 3],
[2, 4, 6]])
Here is an example function which operates like a "lambda" ufunc::
def luf(lamdaexpr, *args, **kwargs):
"luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)"
nargs = len(args)
op = (kwargs.get('out',None),) + args
it = np.nditer(op, ['buffered','external_loop'],
[['writeonly','allocate','no_broadcast']] +
[['readonly','nbo','aligned']]*nargs,
order=kwargs.get('order','K'),
casting=kwargs.get('casting','safe'),
buffersize=kwargs.get('buffersize',0))
while not it.finished:
it[0] = lamdaexpr(*it[1:])
it.iternext()
return it.operands[0]
>>> a = np.arange(5)
>>> b = np.ones(5)
>>> luf(lambda i,j:i*i + j/2, a, b)
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
""")
# nditer methods
add_newdoc('numpy.core', 'nditer', ('copy',
"""
copy()
Get a copy of the iterator in its current state.
Examples
--------
>>> x = np.arange(10)
>>> y = x + 1
>>> it = np.nditer([x, y])
>>> it.next()
(array(0), array(1))
>>> it2 = it.copy()
>>> it2.next()
(array(1), array(2))
"""))
add_newdoc('numpy.core', 'nditer', ('debug_print',
"""
debug_print()
Print the current state of the `nditer` instance and debug info to stdout.
"""))
add_newdoc('numpy.core', 'nditer', ('enable_external_loop',
"""
enable_external_loop()
When the "external_loop" was not used during construction, but
is desired, this modifies the iterator to behave as if the flag
was specified.
"""))
add_newdoc('numpy.core', 'nditer', ('iternext',
"""
iternext()
Check whether iterations are left, and perform a single internal iteration
without returning the result. Used in the C-style pattern do-while
pattern. For an example, see `nditer`.
Returns
-------
iternext : bool
Whether or not there are iterations left.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_axis',
"""
remove_axis(i)
Removes axis `i` from the iterator. Requires that the flag "multi_index"
be enabled.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_multi_index',
"""
remove_multi_index()
When the "multi_index" flag was specified, this removes it, allowing
the internal iteration structure to be optimized further.
"""))
add_newdoc('numpy.core', 'nditer', ('reset',
"""
reset()
Reset the iterator to its initial state.
"""))
###############################################################################
#
# broadcast
#
###############################################################################
add_newdoc('numpy.core', 'broadcast',
"""
Produce an object that mimics broadcasting.
Parameters
----------
in1, in2, ... : array_like
Input parameters.
Returns
-------
b : broadcast object
Broadcast the input parameters against one another, and
return an object that encapsulates the result.
Amongst others, it has ``shape`` and ``nd`` properties, and
may be used as an iterator.
See Also
--------
broadcast_arrays
broadcast_to
Examples
--------
Manually adding two vectors, using broadcasting:
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> out = np.empty(b.shape)
>>> out.flat = [u+v for (u,v) in b]
>>> out
array([[ 5., 6., 7.],
[ 6., 7., 8.],
[ 7., 8., 9.]])
Compare against built-in broadcasting:
>>> x + y
array([[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
""")
# attributes
add_newdoc('numpy.core', 'broadcast', ('index',
"""
current index in broadcasted result
Examples
--------
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> b.next(), b.next(), b.next()
((1, 4), (1, 5), (1, 6))
>>> b.index
3
"""))
add_newdoc('numpy.core', 'broadcast', ('iters',
"""
tuple of iterators along ``self``'s "components."
Returns a tuple of `numpy.flatiter` objects, one for each "component"
of ``self``.
See Also
--------
numpy.flatiter
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> row, col = b.iters
>>> row.next(), col.next()
(1, 4)
"""))
add_newdoc('numpy.core', 'broadcast', ('ndim',
"""
Number of dimensions of broadcasted result. Alias for `nd`.
.. versionadded:: 1.12.0
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.ndim
2
"""))
add_newdoc('numpy.core', 'broadcast', ('nd',
"""
Number of dimensions of broadcasted result. For code intended for NumPy
1.12.0 and later the more consistent `ndim` is preferred.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.nd
2
"""))
add_newdoc('numpy.core', 'broadcast', ('numiter',
"""
Number of iterators possessed by the broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.numiter
2
"""))
add_newdoc('numpy.core', 'broadcast', ('shape',
"""
Shape of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.shape
(3, 3)
"""))
add_newdoc('numpy.core', 'broadcast', ('size',
"""
Total size of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.size
9
"""))
add_newdoc('numpy.core', 'broadcast', ('reset',
"""
reset()
Reset the broadcasted result's iterator(s).
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]]
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> b.next(), b.next(), b.next()
((1, 4), (2, 4), (3, 4))
>>> b.index
3
>>> b.reset()
>>> b.index
0
"""))
###############################################################################
#
# numpy functions
#
###############################################################################
add_newdoc('numpy.core.multiarray', 'array',
"""
array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0)
Create an array.
Parameters
----------
object : array_like
An array, any object exposing the array interface, an object whose
__array__ method returns an array, or any (nested) sequence.
dtype : data-type, optional
The desired data-type for the array. If not given, then the type will
be determined as the minimum type required to hold the objects in the
sequence. This argument can only be used to 'upcast' the array. For
downcasting, use the .astype(t) method.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy will
only be made if __array__ returns a copy, if obj is a nested sequence,
or if a copy is needed to satisfy any of the other requirements
(`dtype`, `order`, etc.).
order : {'K', 'A', 'C', 'F'}, optional
Specify the memory layout of the array. If object is not an array, the
newly created array will be in C order (row major) unless 'F' is
specified, in which case it will be in Fortran order (column major).
If object is an array the following holds.
===== ========= ===================================================
order no copy copy=True
===== ========= ===================================================
'K' unchanged F & C order preserved, otherwise most similar order
'A' unchanged F order if input is F and not C, otherwise C order
'C' C order C order
'F' F order F order
===== ========= ===================================================
When ``copy=False`` and a copy is made for other reasons, the result is
the same as if ``copy=True``, with some exceptions for `A`, see the
Notes section. The default order is 'K'.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting
array should have. Ones will be pre-pended to the shape as
needed to meet this requirement.
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
See Also
--------
empty, empty_like, zeros, zeros_like, ones, ones_like, full, full_like
Notes
-----
When order is 'A' and `object` is an array in neither 'C' nor 'F' order,
and a copy is forced by a change in dtype, then the order of the result is
not necessarily 'C' as expected. This is likely a bug.
Examples
--------
>>> np.array([1, 2, 3])
array([1, 2, 3])
Upcasting:
>>> np.array([1, 2, 3.0])
array([ 1., 2., 3.])
More than one dimension:
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
Minimum dimensions 2:
>>> np.array([1, 2, 3], ndmin=2)
array([[1, 2, 3]])
Type provided:
>>> np.array([1, 2, 3], dtype=complex)
array([ 1.+0.j, 2.+0.j, 3.+0.j])
Data-type consisting of more than one element:
>>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')])
>>> x['a']
array([1, 3])
Creating an array from sub-classes:
>>> np.array(np.mat('1 2; 3 4'))
array([[1, 2],
[3, 4]])
>>> np.array(np.mat('1 2; 3 4'), subok=True)
matrix([[1, 2],
[3, 4]])
""")
add_newdoc('numpy.core.multiarray', 'empty',
"""
empty(shape, dtype=float, order='C')
Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty array
dtype : data-type, optional
Desired output data-type.
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data of the given shape, dtype, and
order. Object arrays will be initialized to None.
See Also
--------
empty_like, zeros, ones
Notes
-----
`empty`, unlike `zeros`, does not set the array values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> np.empty([2, 2])
array([[ -9.74499359e+001, 6.69583040e-309],
[ 2.13182611e-314, 3.06959433e-309]]) #random
>>> np.empty([2, 2], dtype=int)
array([[-1073741821, -1067949133],
[ 496041986, 19249760]]) #random
""")
add_newdoc('numpy.core.multiarray', 'empty_like',
"""
empty_like(a, dtype=None, order='K', subok=True)
Return a new array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of the
returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of ``a`` as closely
as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = ([1,2,3], [4,5,6]) # a is array-like
>>> np.empty_like(a)
array([[-1073741821, -1073741821, 3], #random
[ 0, 0, -1073741821]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
""")
add_newdoc('numpy.core.multiarray', 'scalar',
"""
scalar(dtype, obj)
Return a new scalar array of the given type initialized with obj.
This function is meant mainly for pickle support. `dtype` must be a
valid data-type descriptor. If `dtype` corresponds to an object
descriptor, then `obj` can be any object, otherwise `obj` must be a
string. If `obj` is not given, it will be interpreted as None for object
type and as zeros for all other types.
""")
add_newdoc('numpy.core.multiarray', 'zeros',
"""
zeros(shape, dtype=float, order='C')
Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and order.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> np.zeros(5)
array([ 0., 0., 0., 0., 0.])
>>> np.zeros((5,), dtype=np.int)
array([0, 0, 0, 0, 0])
>>> np.zeros((2, 1))
array([[ 0.],
[ 0.]])
>>> s = (2,2)
>>> np.zeros(s)
array([[ 0., 0.],
[ 0., 0.]])
>>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
array([(0, 0), (0, 0)],
dtype=[('x', '<i4'), ('y', '<i4')])
""")
add_newdoc('numpy.core.multiarray', 'set_typeDict',
"""set_typeDict(dict)
Set the internal dictionary that can look up an array type using a
registered code.
""")
add_newdoc('numpy.core.multiarray', 'fromstring',
"""
fromstring(string, dtype=float, count=-1, sep='')
A new 1-D array initialized from raw binary or text data in a string.
Parameters
----------
string : str
A string containing the data.
dtype : data-type, optional
The data type of the array; default: float. For binary input data,
the data must be in exactly this format.
count : int, optional
Read this number of `dtype` elements from the data. If this is
negative (the default), the count will be determined from the
length of the data.
sep : str, optional
If not provided or, equivalently, the empty string, the data will
be interpreted as binary data; otherwise, as ASCII text with
decimal numbers. Also in this latter case, this argument is
interpreted as the string separating numbers in the data; extra
whitespace between elements is also ignored.
Returns
-------
arr : ndarray
The constructed array.
Raises
------
ValueError
If the string is not the correct size to satisfy the requested
`dtype` and `count`.
See Also
--------
frombuffer, fromfile, fromiter
Examples
--------
>>> np.fromstring('\\x01\\x02', dtype=np.uint8)
array([1, 2], dtype=uint8)
>>> np.fromstring('1 2', dtype=int, sep=' ')
array([1, 2])
>>> np.fromstring('1, 2', dtype=int, sep=',')
array([1, 2])
>>> np.fromstring('\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
array([1, 2, 3], dtype=uint8)
""")
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
fromiter(iterable, dtype, count=-1)
Create a new 1-dimensional array from an iterable object.
Parameters
----------
iterable : iterable object
An iterable object providing data for the array.
dtype : data-type
The data-type of the returned array.
count : int, optional
The number of items to read from *iterable*. The default is -1,
which means all data is read.
Returns
-------
out : ndarray
The output array.
Notes
-----
Specify `count` to improve performance. It allows ``fromiter`` to
pre-allocate the output array, instead of resizing it on demand.
Examples
--------
>>> iterable = (x*x for x in range(5))
>>> np.fromiter(iterable, np.float)
array([ 0., 1., 4., 9., 16.])
""")
add_newdoc('numpy.core.multiarray', 'fromfile',
"""
fromfile(file, dtype=float, count=-1, sep='')
Construct an array from data in a text or binary file.
A highly efficient way of reading binary data with a known data-type,
as well as parsing simply formatted text files. Data written using the
`tofile` method can be read using this function.
Parameters
----------
file : file or str
Open file object or filename.
dtype : data-type
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
of the items in the file.
count : int
Number of items to read. ``-1`` means all items (i.e., the complete
file).
sep : str
Separator between items if file is a text file.
Empty ("") separator means the file should be treated as binary.
Spaces (" ") in the separator match zero or more whitespace characters.
A separator consisting only of spaces must match at least one
whitespace.
See also
--------
load, save
ndarray.tofile
loadtxt : More flexible way of loading data from a text file.
Notes
-----
Do not rely on the combination of `tofile` and `fromfile` for
data storage, as the binary files generated are are not platform
independent. In particular, no byte-order or data-type information is
saved. Data can be stored in the platform independent ``.npy`` format
using `save` and `load` instead.
Examples
--------
Construct an ndarray:
>>> dt = np.dtype([('time', [('min', int), ('sec', int)]),
... ('temp', float)])
>>> x = np.zeros((1,), dtype=dt)
>>> x['time']['min'] = 10; x['temp'] = 98.25
>>> x
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
Save the raw data to disk:
>>> import os
>>> fname = os.tmpnam()
>>> x.tofile(fname)
Read the raw data from disk:
>>> np.fromfile(fname, dtype=dt)
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
The recommended way to store and load data:
>>> np.save(fname, x)
>>> np.load(fname + '.npy')
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
""")
add_newdoc('numpy.core.multiarray', 'frombuffer',
"""
frombuffer(buffer, dtype=float, count=-1, offset=0)
Interpret a buffer as a 1-dimensional array.
Parameters
----------
buffer : buffer_like
An object that exposes the buffer interface.
dtype : data-type, optional
Data-type of the returned array; default: float.
count : int, optional
Number of items to read. ``-1`` means all data in the buffer.
offset : int, optional
Start reading the buffer from this offset; default: 0.
Notes
-----
If the buffer has data that is not in machine byte-order, this should
be specified as part of the data-type, e.g.::
>>> dt = np.dtype(int)
>>> dt = dt.newbyteorder('>')
>>> np.frombuffer(buf, dtype=dt)
The data of the resulting array will not be byteswapped, but will be
interpreted correctly.
Examples
--------
>>> s = 'hello world'
>>> np.frombuffer(s, dtype='S1', count=5, offset=6)
array(['w', 'o', 'r', 'l', 'd'],
dtype='|S1')
""")
add_newdoc('numpy.core.multiarray', 'concatenate',
"""
concatenate((a1, a2, ...), axis=0)
Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
ma.concatenate : Concatenate function that preserves input masks.
array_split : Split an array into multiple sub-arrays of equal or
near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise)
vsplit : Split array into multiple sub-arrays vertically (row wise)
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
stack : Stack a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise)
vstack : Stack arrays in sequence vertically (row wise)
dstack : Stack arrays in sequence depth wise (along third dimension)
Notes
-----
When one or more of the arrays to be concatenated is a MaskedArray,
this function will return a MaskedArray object instead of an ndarray,
but the input masks are *not* preserved. In cases where a MaskedArray
is expected as input, use the ma.concatenate function from the masked
array module instead.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.concatenate((a, b.T), axis=1)
array([[1, 2, 5],
[3, 4, 6]])
This function will not preserve masking of MaskedArray inputs.
>>> a = np.ma.arange(3)
>>> a[1] = np.ma.masked
>>> b = np.arange(2, 5)
>>> a
masked_array(data = [0 -- 2],
mask = [False True False],
fill_value = 999999)
>>> b
array([2, 3, 4])
>>> np.concatenate([a, b])
masked_array(data = [0 1 2 2 3 4],
mask = False,
fill_value = 999999)
>>> np.ma.concatenate([a, b])
masked_array(data = [0 -- 2 2 3 4],
mask = [False True False False False False],
fill_value = 999999)
""")
add_newdoc('numpy.core', 'inner',
"""
inner(a, b)
Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : array_like
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14, 38, 62],
[ 86, 110, 134]])
An example where `b` is a scalar:
>>> np.inner(np.eye(2), 7)
array([[ 7., 0.],
[ 0., 7.]])
""")
add_newdoc('numpy.core', 'fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
add_newdoc('numpy.core.multiarray', 'correlate',
"""cross_correlate(a,v, mode=0)""")
add_newdoc('numpy.core.multiarray', 'arange',
"""
arange([start,] stop[, step,], dtype=None)
Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range <http://docs.python.org/lib/built-in-funcs.html>`_ function,
but returns an ndarray rather than a list.
When using a non-integer step, such as 0.1, the results will often not
be consistent. It is better to use ``linspace`` for these cases.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified, `start` must also be given.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
See Also
--------
linspace : Evenly spaced numbers with careful handling of endpoints.
ogrid: Arrays of evenly spaced numbers in N-dimensions.
mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
Examples
--------
>>> np.arange(3)
array([0, 1, 2])
>>> np.arange(3.0)
array([ 0., 1., 2.])
>>> np.arange(3,7)
array([3, 4, 5, 6])
>>> np.arange(3,7,2)
array([3, 5])
""")
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
"""_get_ndarray_c_version()
Return the compile time NDARRAY_VERSION number.
""")
add_newdoc('numpy.core.multiarray', '_reconstruct',
"""_reconstruct(subtype, shape, dtype)
Construct an empty array. Used by Pickles.
""")
add_newdoc('numpy.core.multiarray', 'set_string_function',
"""
set_string_function(f, repr=1)
Internal method to set a function to be used when pretty printing arrays.
""")
add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
"""
set_numeric_ops(op1=func1, op2=func2, ...)
Set numerical operators for array objects.
Parameters
----------
op1, op2, ... : callable
Each ``op = func`` pair describes an operator to be replaced.
For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace
addition by modulus 5 addition.
Returns
-------
saved_ops : list of callables
A list of all operators, stored before making replacements.
Notes
-----
.. WARNING::
Use with care! Incorrect usage may lead to memory errors.
A function replacing an operator cannot make use of that operator.
For example, when replacing add, you may not use ``+``. Instead,
directly call ufuncs.
Examples
--------
>>> def add_mod5(x, y):
... return np.add(x, y) % 5
...
>>> old_funcs = np.set_numeric_ops(add=add_mod5)
>>> x = np.arange(12).reshape((3, 4))
>>> x + x
array([[0, 2, 4, 1],
[3, 0, 2, 4],
[1, 3, 0, 2]])
>>> ignore = np.set_numeric_ops(**old_funcs) # restore operators
""")
add_newdoc('numpy.core.multiarray', 'where',
"""
where(condition, [x, y])
Return elements, either from `x` or `y`, depending on `condition`.
If only `condition` is given, return ``condition.nonzero()``.
Parameters
----------
condition : array_like, bool
When True, yield `x`, otherwise yield `y`.
x, y : array_like, optional
Values from which to choose. `x` and `y` need to have the same
shape as `condition`.
Returns
-------
out : ndarray or tuple of ndarrays
If both `x` and `y` are specified, the output array contains
elements of `x` where `condition` is True, and elements from
`y` elsewhere.
If only `condition` is given, return the tuple
``condition.nonzero()``, the indices where `condition` is True.
See Also
--------
nonzero, choose
Notes
-----
If `x` and `y` are given and input arrays are 1-D, `where` is
equivalent to::
[xv if c else yv for (c,xv,yv) in zip(condition,x,y)]
Examples
--------
>>> np.where([[True, False], [True, True]],
... [[1, 2], [3, 4]],
... [[9, 8], [7, 6]])
array([[1, 8],
[3, 4]])
>>> np.where([[0, 1], [1, 0]])
(array([0, 1]), array([1, 0]))
>>> x = np.arange(9.).reshape(3, 3)
>>> np.where( x > 5 )
(array([2, 2, 2]), array([0, 1, 2]))
>>> x[np.where( x > 3.0 )] # Note: result is 1D.
array([ 4., 5., 6., 7., 8.])
>>> np.where(x < 5, x, -1) # Note: broadcasting.
array([[ 0., 1., 2.],
[ 3., 4., -1.],
[-1., -1., -1.]])
Find the indices of elements of `x` that are in `goodvalues`.
>>> goodvalues = [3, 4, 7]
>>> ix = np.in1d(x.ravel(), goodvalues).reshape(x.shape)
>>> ix
array([[False, False, False],
[ True, True, False],
[False, True, False]], dtype=bool)
>>> np.where(ix)
(array([1, 1, 2]), array([0, 1, 1]))
""")
add_newdoc('numpy.core.multiarray', 'lexsort',
"""
lexsort(keys, axis=-1)
Perform an indirect sort using a sequence of keys.
Given multiple sorting keys, which can be interpreted as columns in a
spreadsheet, lexsort returns an array of integer indices that describes
the sort order by multiple columns. The last key in the sequence is used
for the primary sort order, the second-to-last key for the secondary sort
order, and so on. The keys argument must be a sequence of objects that
can be converted to arrays of the same shape. If a 2D array is provided
for the keys argument, it's rows are interpreted as the sorting keys and
sorting is according to the last row, second last row etc.
Parameters
----------
keys : (k, N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
Axis to be indirectly sorted. By default, sort over the last axis.
Returns
-------
indices : (N,) ndarray of ints
Array of indices that sort the keys along the specified axis.
See Also
--------
argsort : Indirect sort.
ndarray.sort : In-place sort.
sort : Return a sorted copy of an array.
Examples
--------
Sort names: first by surname, then by name.
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
>>> ind = np.lexsort((first_names, surnames))
>>> ind
array([1, 2, 0])
>>> [surnames[i] + ", " + first_names[i] for i in ind]
['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
Sort two columns of numbers:
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
>>> print(ind)
[2 0 4 6 5 3 1]
>>> [(a[i],b[i]) for i in ind]
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
Note that sorting is first according to the elements of ``a``.
Secondary sorting is according to the elements of ``b``.
A normal ``argsort`` would have yielded:
>>> [(a[i],b[i]) for i in np.argsort(a)]
[(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
Structured arrays are sorted lexically by ``argsort``:
>>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
... dtype=np.dtype([('x', int), ('y', int)]))
>>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
array([2, 0, 4, 6, 5, 3, 1])
""")
add_newdoc('numpy.core.multiarray', 'can_cast',
"""
can_cast(from, totype, casting = 'safe')
Returns True if cast between data types can occur according to the
casting rule. If from is a scalar or array scalar, also returns
True if the scalar value can be cast without overflow or truncation
to an integer.
Parameters
----------
from : dtype, dtype specifier, scalar, or array
Data type, scalar, or array to cast from.
totype : dtype or dtype specifier
Data type to cast to.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Returns
-------
out : bool
True if cast can occur according to the casting rule.
Notes
-----
Starting in NumPy 1.9, can_cast function now returns False in 'safe'
casting mode for integer/float dtype and string dtype if the string dtype
length is not long enough to store the max integer/float value converted
to a string. Previously can_cast in 'safe' mode returned True for
integer/float dtype and a string dtype of any length.
See also
--------
dtype, result_type
Examples
--------
Basic examples
>>> np.can_cast(np.int32, np.int64)
True
>>> np.can_cast(np.float64, np.complex)
True
>>> np.can_cast(np.complex, np.float)
False
>>> np.can_cast('i8', 'f8')
True
>>> np.can_cast('i8', 'f4')
False
>>> np.can_cast('i4', 'S4')
False
Casting scalars
>>> np.can_cast(100, 'i1')
True
>>> np.can_cast(150, 'i1')
False
>>> np.can_cast(150, 'u1')
True
>>> np.can_cast(3.5e100, np.float32)
False
>>> np.can_cast(1000.0, np.float32)
True
Array scalar checks the value, array does not
>>> np.can_cast(np.array(1000.0), np.float32)
True
>>> np.can_cast(np.array([1000.0]), np.float32)
False
Using the casting rules
>>> np.can_cast('i8', 'i8', 'no')
True
>>> np.can_cast('<i8', '>i8', 'no')
False
>>> np.can_cast('<i8', '>i8', 'equiv')
True
>>> np.can_cast('<i4', '>i8', 'equiv')
False
>>> np.can_cast('<i4', '>i8', 'safe')
True
>>> np.can_cast('<i8', '>i4', 'safe')
False
>>> np.can_cast('<i8', '>i4', 'same_kind')
True
>>> np.can_cast('<i8', '>u4', 'same_kind')
False
>>> np.can_cast('<i8', '>u4', 'unsafe')
True
""")
add_newdoc('numpy.core.multiarray', 'promote_types',
"""
promote_types(type1, type2)
Returns the data type with the smallest size and smallest scalar
kind to which both ``type1`` and ``type2`` may be safely cast.
The returned data type is always in native byte order.
This function is symmetric and associative.
Parameters
----------
type1 : dtype or dtype specifier
First data type.
type2 : dtype or dtype specifier
Second data type.
Returns
-------
out : dtype
The promoted data type.
Notes
-----
.. versionadded:: 1.6.0
Starting in NumPy 1.9, promote_types function now returns a valid string
length when given an integer or float dtype as one argument and a string
dtype as another argument. Previously it always returned the input string
dtype, even if it wasn't long enough to store the max integer/float value
converted to a string.
See Also
--------
result_type, dtype, can_cast
Examples
--------
>>> np.promote_types('f4', 'f8')
dtype('float64')
>>> np.promote_types('i8', 'f4')
dtype('float64')
>>> np.promote_types('>i8', '<c8')
dtype('complex128')
>>> np.promote_types('i4', 'S8')
dtype('S11')
""")
add_newdoc('numpy.core.multiarray', 'min_scalar_type',
"""
min_scalar_type(a)
For scalar ``a``, returns the data type with the smallest size
and smallest scalar kind which can hold its value. For non-scalar
array ``a``, returns the vector's dtype unmodified.
Floating point values are not demoted to integers,
and complex values are not demoted to floats.
Parameters
----------
a : scalar or array_like
The value whose minimal data type is to be found.
Returns
-------
out : dtype
The minimal data type.
Notes
-----
.. versionadded:: 1.6.0
See Also
--------
result_type, promote_types, dtype, can_cast
Examples
--------
>>> np.min_scalar_type(10)
dtype('uint8')
>>> np.min_scalar_type(-260)
dtype('int16')
>>> np.min_scalar_type(3.1)
dtype('float16')
>>> np.min_scalar_type(1e50)
dtype('float64')
>>> np.min_scalar_type(np.arange(4,dtype='f8'))
dtype('float64')
""")
add_newdoc('numpy.core.multiarray', 'result_type',
"""
result_type(*arrays_and_dtypes)
Returns the type that results from applying the NumPy
type promotion rules to the arguments.
Type promotion in NumPy works similarly to the rules in languages
like C++, with some slight differences. When both scalars and
arrays are used, the array's type takes precedence and the actual value
of the scalar is taken into account.
For example, calculating 3*a, where a is an array of 32-bit floats,
intuitively should result in a 32-bit float output. If the 3 is a
32-bit integer, the NumPy rules indicate it can't convert losslessly
into a 32-bit float, so a 64-bit float should be the result type.
By examining the value of the constant, '3', we see that it fits in
an 8-bit integer, which can be cast losslessly into the 32-bit float.
Parameters
----------
arrays_and_dtypes : list of arrays and dtypes
The operands of some operation whose result type is needed.
Returns
-------
out : dtype
The result type.
See also
--------
dtype, promote_types, min_scalar_type, can_cast
Notes
-----
.. versionadded:: 1.6.0
The specific algorithm used is as follows.
Categories are determined by first checking which of boolean,
integer (int/uint), or floating point (float/complex) the maximum
kind of all the arrays and the scalars are.
If there are only scalars or the maximum category of the scalars
is higher than the maximum category of the arrays,
the data types are combined with :func:`promote_types`
to produce the return value.
Otherwise, `min_scalar_type` is called on each array, and
the resulting data types are all combined with :func:`promote_types`
to produce the return value.
The set of int values is not a subset of the uint values for types
with the same number of bits, something not reflected in
:func:`min_scalar_type`, but handled as a special case in `result_type`.
Examples
--------
>>> np.result_type(3, np.arange(7, dtype='i1'))
dtype('int8')
>>> np.result_type('i4', 'c8')
dtype('complex128')
>>> np.result_type(3.0, -2)
dtype('float64')
""")
add_newdoc('numpy.core.multiarray', 'newbuffer',
"""
newbuffer(size)
Return a new uninitialized buffer object.
Parameters
----------
size : int
Size in bytes of returned buffer object.
Returns
-------
newbuffer : buffer object
Returned, uninitialized buffer object of `size` bytes.
""")
add_newdoc('numpy.core.multiarray', 'getbuffer',
"""
getbuffer(obj [,offset[, size]])
Create a buffer object from the given object referencing a slice of
length size starting at offset.
Default is the entire buffer. A read-write buffer is attempted followed
by a read-only buffer.
Parameters
----------
obj : object
offset : int, optional
size : int, optional
Returns
-------
buffer_obj : buffer
Examples
--------
>>> buf = np.getbuffer(np.ones(5), 1, 3)
>>> len(buf)
3
>>> buf[0]
'\\x00'
>>> buf
<read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
""")
add_newdoc('numpy.core', 'dot',
"""
dot(a, b, out=None)
Dot product of two arrays.
For 2-D arrays it is equivalent to matrix multiplication, and for 1-D
arrays to inner product of vectors (without complex conjugation). For
N dimensions it is a sum product over the last axis of `a` and
the second-to-last of `b`::
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
matmul : '@' operator as method with out parameter.
Examples
--------
>>> np.dot(3, 4)
12
Neither argument is complex-conjugated:
>>> np.dot([2j, 3j], [2j, 3j])
(-13+0j)
For 2-D arrays it is the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.dot(a, b)
array([[4, 1],
[2, 2]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
>>> np.dot(a, b)[2,3,2,1,2,2]
499128
>>> sum(a[2,3,2,:] * b[1,2,:,2])
499128
""")
add_newdoc('numpy.core', 'matmul',
"""
matmul(a, b, out=None)
Matrix product of two arrays.
The behavior depends on the arguments in the following way.
- If both arguments are 2-D they are multiplied like conventional
matrices.
- If either argument is N-D, N > 2, it is treated as a stack of
matrices residing in the last two indexes and broadcast accordingly.
- If the first argument is 1-D, it is promoted to a matrix by
prepending a 1 to its dimensions. After matrix multiplication
the prepended 1 is removed.
- If the second argument is 1-D, it is promoted to a matrix by
appending a 1 to its dimensions. After matrix multiplication
the appended 1 is removed.
Multiplication by a scalar is not allowed, use ``*`` instead. Note that
multiplying a stack of matrices with a vector will result in a stack of
vectors, but matmul will not recognize it as such.
``matmul`` differs from ``dot`` in two important ways.
- Multiplication by scalars is not allowed.
- Stacks of matrices are broadcast together as if the matrices
were elements.
.. warning::
This function is preliminary and included in NumPy 1.10.0 for testing
and documentation. Its semantics will not change, but the number and
order of the optional arguments will.
.. versionadded:: 1.10.0
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
1-D arrays then a scalar is returned; otherwise an array is
returned. If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
If scalar value is passed.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
dot : alternative matrix product with different broadcasting rules.
Notes
-----
The matmul function implements the semantics of the `@` operator introduced
in Python 3.5 following PEP465.
Examples
--------
For 2-D arrays it is the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.matmul(a, b)
array([[4, 1],
[2, 2]])
For 2-D mixed with 1-D, the result is the usual.
>>> a = [[1, 0], [0, 1]]
>>> b = [1, 2]
>>> np.matmul(a, b)
array([1, 2])
>>> np.matmul(b, a)
array([1, 2])
Broadcasting is conventional for stacks of arrays
>>> a = np.arange(2*2*4).reshape((2,2,4))
>>> b = np.arange(2*2*4).reshape((2,4,2))
>>> np.matmul(a,b).shape
(2, 2, 2)
>>> np.matmul(a,b)[0,1,1]
98
>>> sum(a[0,1,:] * b[0,:,1])
98
Vector, vector returns the scalar inner product, but neither argument
is complex-conjugated:
>>> np.matmul([2j, 3j], [2j, 3j])
(-13+0j)
Scalar multiplication raises an error.
>>> np.matmul([1,2], 3)
Traceback (most recent call last):
...
ValueError: Scalar operands are not allowed, use '*' instead
""")
add_newdoc('numpy.core', 'einsum',
"""
einsum(subscripts, *operands, out=None, dtype=None, order='K', casting='safe')
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional
array operations can be represented in a simple fashion. This function
provides a way to compute such summations. The best way to understand this
function is to try the examples below, which show how many common NumPy
functions can be implemented as calls to `einsum`.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
dtype : data-type, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
dot, inner, outer, tensordot
Notes
-----
.. versionadded:: 1.6.0
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Repeated subscripts labels in one operand take the diagonal. For example,
``np.einsum('ii', a)`` is equivalent to ``np.trace(a)``.
Whenever a label is repeated, it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to ``np.inner(a,b)``. If a label appears only once,
it is not summed, so ``np.einsum('i', a)`` produces a view of ``a``
with no changes.
The order of labels in the output is by default alphabetical. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose.
The output can be controlled by specifying output subscript labels
as well. This specifies the label order, and allows summing to
be disallowed or forced when desired. The call ``np.einsum('i->', a)``
is like ``np.sum(a, axis=-1)``, and ``np.einsum('ii->i', a)``
is like ``np.diag(a)``. The difference is that `einsum` does not
allow broadcasting by default.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, you can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
An alternative way to provide the subscripts and operands is as
``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. The examples
below have corresponding `einsum` calls with the two parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as ``np.swapaxes(a, 0, 2)`` and
``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> c.T
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum('i...->...', a)
array([50, 55, 60, 65, 70])
>>> np.einsum(a, [0,Ellipsis], [Ellipsis])
array([50, 55, 60, 65, 70])
>>> np.sum(a, axis=0)
array([50, 55, 60, 65, 70])
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> # since version 1.10.0
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
""")
add_newdoc('numpy.core', 'vdot',
"""
vdot(a, b)
Return the dot product of two vectors.
The vdot(`a`, `b`) function handles complex numbers differently than
dot(`a`, `b`). If the first argument is complex the complex conjugate
of the first argument is used for the calculation of the dot product.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : array_like
If `a` is complex the complex conjugate is taken before calculation
of the dot product.
b : array_like
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`. Can be an int, float, or
complex depending on the types of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
>>> a = np.array([1+2j,3+4j])
>>> b = np.array([5+6j,7+8j])
>>> np.vdot(a, b)
(70-8j)
>>> np.vdot(b, a)
(70+8j)
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
""")
##############################################################################
#
# Documentation for ndarray attributes and methods
#
##############################################################################
##############################################################################
#
# ndarray object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray',
"""
ndarray(shape, dtype=float, buffer=None, offset=0,
strides=None, order=None)
An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type object describes the
format of each element in the array (its byte-order, how many bytes it
occupies in memory, whether it is an integer, a floating point number,
or something else, etc.)
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `numpy` module and examine the
methods and attributes of an array.
Parameters
----------
(for the __new__ method; see Notes below)
shape : tuple of ints
Shape of created array.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type.
buffer : object exposing buffer interface, optional
Used to fill the array with data.
offset : int, optional
Offset of array data in buffer.
strides : tuple of ints, optional
Strides of data in memory.
order : {'C', 'F'}, optional
Row-major (C-style) or column-major (Fortran-style) order.
Attributes
----------
T : ndarray
Transpose of the array.
data : buffer
The array's elements, in memory.
dtype : dtype object
Describes the format of the elements in the array.
flags : dict
Dictionary containing information related to memory use, e.g.,
'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
flat : numpy.flatiter object
Flattened version of the array as an iterator. The iterator
allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
assignment examples; TODO).
imag : ndarray
Imaginary part of the array.
real : ndarray
Real part of the array.
size : int
Number of elements in the array.
itemsize : int
The memory use of each array element in bytes.
nbytes : int
The total number of bytes required to store the array data,
i.e., ``itemsize * size``.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
strides : tuple of ints
The step-size required to move from one element to the next in
memory. For example, a contiguous ``(3, 4)`` array of type
``int16`` in C-order has strides ``(8, 2)``. This implies that
to move from element to element in memory requires jumps of 2 bytes.
To move from row-to-row, one needs to jump 8 bytes at a time
(``2 * 4``).
ctypes : ctypes object
Class containing properties of the array needed for interaction
with ctypes.
base : ndarray
If the array is a view into another array, that array is its `base`
(unless that array is also a view). The `base` array is where the
array data is actually stored.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
dtype : Create a data-type.
Notes
-----
There are two modes of creating an array using ``__new__``:
1. If `buffer` is None, then only `shape`, `dtype`, and `order`
are used.
2. If `buffer` is an object exposing the buffer interface, then
all keywords are interpreted.
No ``__init__`` method is needed because the array is fully initialized
after the ``__new__`` method.
Examples
--------
These examples illustrate the low-level `ndarray` constructor. Refer
to the `See Also` section above for easier ways of constructing an
ndarray.
First mode, `buffer` is None:
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
array([[ -1.13698227e+002, 4.25087011e-303],
[ 2.88528414e-306, 3.27025015e-309]]) #random
Second mode:
>>> np.ndarray((2,), buffer=np.array([1,2,3]),
... offset=np.int_().itemsize,
... dtype=int) # offset = 1*itemsize, i.e. skip first element
array([2, 3])
""")
##############################################################################
#
# ndarray attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
"""Array protocol: Python side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
"""None."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
"""Array priority."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_',
"""Allow the array to be interpreted as a ctypes object by returning the
data-memory location as an integer
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""
Base object if memory is from some other object.
Examples
--------
The base of an array that owns its memory is None:
>>> x = np.array([1,2,3,4])
>>> x.base is None
True
Slicing creates a view, whose memory is shared with x:
>>> y = x[2:]
>>> y.base is x
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
"""
An object to simplify the interaction of the array with the ctypes
module.
This attribute creates an object that makes it easier to use arrays
when calling shared libraries with the ctypes module. The returned
object has, among others, data, shape, and strides attributes (see
Notes below) which themselves return ctypes objects that can be used
as arguments to a shared library.
Parameters
----------
None
Returns
-------
c : Python object
Possessing attributes data, shape, strides, etc.
See Also
--------
numpy.ctypeslib
Notes
-----
Below are the public attributes of this object which were documented
in "Guide to NumPy" (we have omitted undocumented public attributes,
as well as documented private attributes):
* data: A pointer to the memory area of the array as a Python integer.
This memory area may contain data that is not aligned, or not in correct
byte-order. The memory area may not even be writeable. The array
flags and data-type of this array should be respected when passing this
attribute to arbitrary C-code to avoid trouble that can include Python
crashing. User Beware! The value of this attribute is exactly the same
as self._array_interface_['data'][0].
* shape (c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the C-integer corresponding to dtype('p') on this
platform. This base-type could be c_int, c_long, or c_longlong
depending on the platform. The c_intp type is defined accordingly in
numpy.ctypeslib. The ctypes array contains the shape of the underlying
array.
* strides (c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the same as for the shape attribute. This ctypes array
contains the strides information from the underlying array. This strides
information is important for showing how many bytes must be jumped to
get to the next element in the array.
* data_as(obj): Return the data pointer cast to a particular c-types object.
For example, calling self._as_parameter_ is equivalent to
self.data_as(ctypes.c_void_p). Perhaps you want to use the data as a
pointer to a ctypes array of floating-point data:
self.data_as(ctypes.POINTER(ctypes.c_double)).
* shape_as(obj): Return the shape tuple as an array of some other c-types
type. For example: self.shape_as(ctypes.c_short).
* strides_as(obj): Return the strides tuple as an array of some other
c-types type. For example: self.strides_as(ctypes.c_longlong).
Be careful using the ctypes attribute - especially on temporary
arrays or arrays constructed on the fly. For example, calling
``(a+b).ctypes.data_as(ctypes.c_void_p)`` returns a pointer to memory
that is invalid because the array created as (a+b) is deallocated
before the next Python statement. You can avoid this problem using
either ``c=a+b`` or ``ct=(a+b).ctypes``. In the latter case, ct will
hold a reference to the array until ct is deleted or re-assigned.
If the ctypes module is not available, then the ctypes attribute
of array objects still returns something useful, but ctypes objects
are not returned and errors may be raised instead. In particular,
the object will still have the as parameter attribute which will
return an integer equal to the data attribute.
Examples
--------
>>> import ctypes
>>> x
array([[0, 1],
[2, 3]])
>>> x.ctypes.data
30439712
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long))
<ctypes.LP_c_long object at 0x01F01300>
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents
c_long(0)
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents
c_longlong(4294967296L)
>>> x.ctypes.shape
<numpy.core._internal.c_long_Array_2 object at 0x01FFD580>
>>> x.ctypes.shape_as(ctypes.c_long)
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides_as(ctypes.c_longlong)
<numpy.core._internal.c_longlong_Array_2 object at 0x01F01300>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
"""Python buffer object pointing to the start of the array's data."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""
Data-type of the array's elements.
Parameters
----------
None
Returns
-------
d : numpy dtype object
See Also
--------
numpy.dtype
Examples
--------
>>> x
array([[0, 1],
[2, 3]])
>>> x.dtype
dtype('int32')
>>> type(x.dtype)
<type 'numpy.dtype'>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
"""
The imaginary part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.imag
array([ 0. , 0.70710678])
>>> x.imag.dtype
dtype('float64')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
"""
Length of one array element in bytes.
Examples
--------
>>> x = np.array([1,2,3], dtype=np.float64)
>>> x.itemsize
8
>>> x = np.array([1,2,3], dtype=np.complex128)
>>> x.itemsize
16
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
"""
Information about the memory layout of the array.
Attributes
----------
C_CONTIGUOUS (C)
The data is in a single, C-style contiguous segment.
F_CONTIGUOUS (F)
The data is in a single, Fortran-style contiguous segment.
OWNDATA (O)
The array owns the memory it uses or borrows it from another object.
WRITEABLE (W)
The data area can be written to. Setting this to False locks
the data, making it read-only. A view (slice, etc.) inherits WRITEABLE
from its base array at creation time, but a view of a writeable
array may be subsequently locked while the base array remains writeable.
(The opposite is not true, in that a view of a locked array may not
be made writeable. However, currently, locking a base object does not
lock any views that already reference it, so under that circumstance it
is possible to alter the contents of a locked array via a previously
created writeable view onto it.) Attempting to change a non-writeable
array raises a RuntimeError exception.
ALIGNED (A)
The data and all elements are aligned appropriately for the hardware.
UPDATEIFCOPY (U)
This array is a copy of some other array. When this array is
deallocated, the base array will be updated with the contents of
this array.
FNC
F_CONTIGUOUS and not C_CONTIGUOUS.
FORC
F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
BEHAVED (B)
ALIGNED and WRITEABLE.
CARRAY (CA)
BEHAVED and C_CONTIGUOUS.
FARRAY (FA)
BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
Notes
-----
The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
names are only supported in dictionary access.
Only the UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be changed by
the user, via direct assignment to the attribute or dictionary entry,
or by calling `ndarray.setflags`.
The array flags cannot be set arbitrarily:
- UPDATEIFCOPY can only be set ``False``.
- ALIGNED can only be set ``True`` if the data is truly aligned.
- WRITEABLE can only be set ``True`` if the array owns its own memory
or the ultimate owner of the memory exposes a writeable buffer
interface or is a string.
Arrays can be both C-style and Fortran-style contiguous simultaneously.
This is clear for 1-dimensional arrays, but can also be true for higher
dimensional arrays.
Even for contiguous arrays a stride for a given dimension
``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
or the array has no elements.
It does *not* generally hold that ``self.strides[-1] == self.itemsize``
for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
Fortran-style contiguous arrays is true.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
"""
A 1-D iterator over the array.
This is a `numpy.flatiter` instance, which acts similarly to, but is not
a subclass of, Python's built-in iterator object.
See Also
--------
flatten : Return a copy of the array collapsed into one dimension.
flatiter
Examples
--------
>>> x = np.arange(1, 7).reshape(2, 3)
>>> x
array([[1, 2, 3],
[4, 5, 6]])
>>> x.flat[3]
4
>>> x.T
array([[1, 4],
[2, 5],
[3, 6]])
>>> x.T.flat[3]
5
>>> type(x.flat)
<type 'numpy.flatiter'>
An assignment example:
>>> x.flat = 3; x
array([[3, 3, 3],
[3, 3, 3]])
>>> x.flat[[1,4]] = 1; x
array([[3, 1, 3],
[3, 1, 3]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
"""
Total bytes consumed by the elements of the array.
Notes
-----
Does not include memory consumed by non-element attributes of the
array object.
Examples
--------
>>> x = np.zeros((3,5,2), dtype=np.complex128)
>>> x.nbytes
480
>>> np.prod(x.shape) * x.itemsize
480
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
"""
Number of array dimensions.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> x.ndim
1
>>> y = np.zeros((2, 3, 4))
>>> y.ndim
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
"""
The real part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.real
array([ 1. , 0.70710678])
>>> x.real.dtype
dtype('float64')
See Also
--------
numpy.real : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
"""
Tuple of array dimensions.
Notes
-----
May be used to "reshape" the array, as long as this would not
require a change in the total number of elements
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.shape
(4,)
>>> y = np.zeros((2, 3, 4))
>>> y.shape
(2, 3, 4)
>>> y.shape = (3, 8)
>>> y
array([[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> y.shape = (3, 6)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: total size of new array must be unchanged
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
"""
Number of elements in the array.
Equivalent to ``np.prod(a.shape)``, i.e., the product of the array's
dimensions.
Examples
--------
>>> x = np.zeros((3, 5, 2), dtype=np.complex128)
>>> x.size
30
>>> np.prod(x.shape)
30
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
"""
Tuple of bytes to step in each dimension when traversing an array.
The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
is::
offset = sum(np.array(i) * a.strides)
A more detailed explanation of strides can be found in the
"ndarray.rst" file in the NumPy reference guide.
Notes
-----
Imagine an array of 32-bit integers (each 4 bytes)::
x = np.array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]], dtype=np.int32)
This array is stored in memory as 40 bytes, one after the other
(known as a contiguous block of memory). The strides of an array tell
us how many bytes we have to skip in memory to move to the next position
along a certain axis. For example, we have to skip 4 bytes (1 value) to
move to the next column, but 20 bytes (5 values) to get to the same
position in the next row. As such, the strides for the array `x` will be
``(20, 4)``.
See Also
--------
numpy.lib.stride_tricks.as_strided
Examples
--------
>>> y = np.reshape(np.arange(2*3*4), (2,3,4))
>>> y
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
>>> y.strides
(48, 16, 4)
>>> y[1,1,1]
17
>>> offset=sum(y.strides * np.array((1,1,1)))
>>> offset/y.itemsize
17
>>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0)
>>> x.strides
(32, 4, 224, 1344)
>>> i = np.array([3,5,2,2])
>>> offset = sum(i * x.strides)
>>> x[3,5,2,2]
813
>>> offset / x.itemsize
813
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
"""
Same as self.transpose(), except that self is returned if
self.ndim < 2.
Examples
--------
>>> x = np.array([[1.,2.],[3.,4.]])
>>> x
array([[ 1., 2.],
[ 3., 4.]])
>>> x.T
array([[ 1., 3.],
[ 2., 4.]])
>>> x = np.array([1.,2.,3.,4.])
>>> x
array([ 1., 2., 3., 4.])
>>> x.T
array([ 1., 2., 3., 4.])
"""))
##############################################################################
#
# ndarray methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
""" a.__array__(|dtype) -> reference if type unchanged, copy otherwise.
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
"""a.__array_prepare__(obj) -> Object of same type as ndarray object obj.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
"""a.__array_wrap__(obj) -> Object of same type as ndarray object a.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
"""a.__copy__([order])
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A'}, optional
If order is 'C' (False) then the result is contiguous (default).
If order is 'Fortran' (True) then the result has fortran order.
If order is 'Any' (None) then the result has fortran order
only if the array already is in fortran order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
"""a.__deepcopy__() -> Deep copy of array.
Used if copy.deepcopy is called on an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
"""a.__reduce__()
For pickling.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
"""a.__setstate__(version, shape, dtype, isfortran, rawdata)
For unpickling.
Parameters
----------
version : int
optional pickle version. If omitted defaults to 0.
shape : tuple
dtype : data-type
isFortran : bool
rawdata : string or list
a binary string with the data (or a list if 'a' is an object array)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
"""
a.all(axis=None, out=None, keepdims=False)
Returns True if all elements evaluate to True.
Refer to `numpy.all` for full documentation.
See Also
--------
numpy.all : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
"""
a.any(axis=None, out=None, keepdims=False)
Returns True if any of the elements of `a` evaluate to True.
Refer to `numpy.any` for full documentation.
See Also
--------
numpy.any : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
"""
a.argmax(axis=None, out=None)
Return indices of the maximum values along the given axis.
Refer to `numpy.argmax` for full documentation.
See Also
--------
numpy.argmax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
"""
a.argmin(axis=None, out=None)
Return indices of the minimum values along the given axis of `a`.
Refer to `numpy.argmin` for detailed documentation.
See Also
--------
numpy.argmin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""
a.argsort(axis=-1, kind='quicksort', order=None)
Returns the indices that would sort this array.
Refer to `numpy.argsort` for full documentation.
See Also
--------
numpy.argsort : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition',
"""
a.argpartition(kth, axis=-1, kind='introselect', order=None)
Returns the indices that would partition this array.
Refer to `numpy.argpartition` for full documentation.
.. versionadded:: 1.8.0
See Also
--------
numpy.argpartition : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
"""
a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default), otherwise
the returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to false, and the `dtype`, `order`, and `subok`
requirements are satisfied, the input array is returned instead
of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input parameter), `arr_t`
is a new array of the same shape as the input array, with dtype, order
given by `dtype`, `order`.
Notes
-----
Starting in NumPy 1.9, astype method now returns an error if the string
dtype to cast to is not long enough in 'safe' casting mode to hold the max
value of integer/float array that is being casted. Previously the casting
was allowed even if the result was truncated.
Raises
------
ComplexWarning
When casting from complex to float or int. To avoid this,
one should use ``a.real.astype(t)``.
Examples
--------
>>> x = np.array([1, 2, 2.5])
>>> x
array([ 1. , 2. , 2.5])
>>> x.astype(int)
array([1, 2, 2])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""
a.byteswap(inplace)
Swap the bytes of the array elements
Toggle between low-endian and big-endian data representation by
returning a byteswapped array, optionally swapped in-place.
Parameters
----------
inplace : bool, optional
If ``True``, swap bytes in-place, default is ``False``.
Returns
-------
out : ndarray
The byteswapped array. If `inplace` is ``True``, this is
a view to self.
Examples
--------
>>> A = np.array([1, 256, 8755], dtype=np.int16)
>>> map(hex, A)
['0x1', '0x100', '0x2233']
>>> A.byteswap(True)
array([ 256, 1, 13090], dtype=int16)
>>> map(hex, A)
['0x100', '0x1', '0x3322']
Arrays of strings are not swapped
>>> A = np.array(['ceg', 'fac'])
>>> A.byteswap()
array(['ceg', 'fac'],
dtype='|S3')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
"""
a.choose(choices, out=None, mode='raise')
Use an index array to construct a new array from a set of choices.
Refer to `numpy.choose` for full documentation.
See Also
--------
numpy.choose : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""
a.clip(min=None, max=None, out=None)
Return an array whose values are limited to ``[min, max]``.
One of max or min must be given.
Refer to `numpy.clip` for full documentation.
See Also
--------
numpy.clip : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
"""
a.compress(condition, axis=None, out=None)
Return selected slices of this array along given axis.
Refer to `numpy.compress` for full documentation.
See Also
--------
numpy.compress : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
"""
a.conj()
Complex-conjugate all elements.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
"""
a.conjugate()
Return the complex conjugate, element-wise.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
"""
a.copy(order='C')
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :func:numpy.copy are very
similar, but have different default values for their order=
arguments.)
See also
--------
numpy.copy
numpy.copyto
Examples
--------
>>> x = np.array([[1,2,3],[4,5,6]], order='F')
>>> y = x.copy()
>>> x.fill(0)
>>> x
array([[0, 0, 0],
[0, 0, 0]])
>>> y
array([[1, 2, 3],
[4, 5, 6]])
>>> y.flags['C_CONTIGUOUS']
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
"""
a.cumprod(axis=None, dtype=None, out=None)
Return the cumulative product of the elements along the given axis.
Refer to `numpy.cumprod` for full documentation.
See Also
--------
numpy.cumprod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
"""
a.cumsum(axis=None, dtype=None, out=None)
Return the cumulative sum of the elements along the given axis.
Refer to `numpy.cumsum` for full documentation.
See Also
--------
numpy.cumsum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
"""
a.diagonal(offset=0, axis1=0, axis2=1)
Return specified diagonals. In NumPy 1.9 the returned array is a
read-only view instead of a copy as in previous NumPy versions. In
a future version the read-only restriction will be removed.
Refer to :func:`numpy.diagonal` for full documentation.
See Also
--------
numpy.diagonal : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dot',
"""
a.dot(b, out=None)
Dot product of two arrays.
Refer to `numpy.dot` for full documentation.
See Also
--------
numpy.dot : equivalent function
Examples
--------
>>> a = np.eye(2)
>>> b = np.ones((2, 2)) * 2
>>> a.dot(b)
array([[ 2., 2.],
[ 2., 2.]])
This array method can be conveniently chained:
>>> a.dot(b).dot(b)
array([[ 8., 8.],
[ 8., 8.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
"""a.dump(file)
Dump a pickle of the array to the specified file.
The array can be read back with pickle.load or numpy.load.
Parameters
----------
file : str
A string naming the dump file.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
"""
a.dumps()
Returns the pickle of the array as a string.
pickle.loads or numpy.loads will convert the string back to an array.
Parameters
----------
None
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
"""
a.fill(value)
Fill the array with a scalar value.
Parameters
----------
value : scalar
All elements of `a` will be assigned this value.
Examples
--------
>>> a = np.array([1, 2])
>>> a.fill(0)
>>> a
array([0, 0])
>>> a = np.empty(2)
>>> a.fill(1)
>>> a
array([ 1., 1.])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
"""
a.flatten(order='C')
Return a copy of the array collapsed into one dimension.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order.
'F' means to flatten in column-major (Fortran-
style) order. 'A' means to flatten in column-major
order if `a` is Fortran *contiguous* in memory,
row-major order otherwise. 'K' means to flatten
`a` in the order the elements occur in memory.
The default is 'C'.
Returns
-------
y : ndarray
A copy of the input array, flattened to one dimension.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the array.
Examples
--------
>>> a = np.array([[1,2], [3,4]])
>>> a.flatten()
array([1, 2, 3, 4])
>>> a.flatten('F')
array([1, 3, 2, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
"""
a.getfield(dtype, offset=0)
Returns a field of the given array as a certain type.
A field is a view of the array data with a given data-type. The values in
the view are determined by the given type and the offset into the current
array in bytes. The offset needs to be such that the view dtype fits in the
array dtype; for example an array of dtype complex128 has 16-byte elements.
If taking a view with a 32-bit integer (4 bytes), the offset needs to be
between 0 and 12 bytes.
Parameters
----------
dtype : str or dtype
The data type of the view. The dtype size of the view can not be larger
than that of the array itself.
offset : int
Number of bytes to skip before beginning the element view.
Examples
--------
>>> x = np.diag([1.+1.j]*2)
>>> x[1, 1] = 2 + 4.j
>>> x
array([[ 1.+1.j, 0.+0.j],
[ 0.+0.j, 2.+4.j]])
>>> x.getfield(np.float64)
array([[ 1., 0.],
[ 0., 2.]])
By choosing an offset of 8 bytes we can select the complex part of the
array for our view:
>>> x.getfield(np.float64, offset=8)
array([[ 1., 0.],
[ 0., 4.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
"""
a.item(*args)
Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
\\*args : Arguments (variable number and type)
* none: in this case, the method only works for arrays
with one element (`a.size == 1`), which element is
copied into a standard Python scalar object and returned.
* int_type: this argument is interpreted as a flat index into
the array, specifying which element to copy and return.
* tuple of int_types: functions as does a single int_type argument,
except that the argument is interpreted as an nd-index into the
array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable
Python scalar
Notes
-----
When the data type of `a` is longdouble or clongdouble, item() returns
a scalar array object because there is no available Python scalar that
would not lose information. Void arrays return a buffer object for item(),
unless fields are defined, in which case a tuple is returned.
`item` is very similar to a[args], except, instead of an array scalar,
a standard Python scalar is returned. This can be useful for speeding up
access to elements of the array and doing arithmetic on elements of the
array using Python's optimized math.
Examples
--------
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[3, 1, 7],
[2, 8, 3],
[8, 5, 3]])
>>> x.item(3)
2
>>> x.item(7)
5
>>> x.item((0, 1))
1
>>> x.item((2, 2))
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
"""
a.itemset(*args)
Insert scalar into an array (scalar is cast to array's dtype, if possible)
There must be at least 1 argument, and define the last argument
as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster
than ``a[args] = item``. The item should be a scalar value and `args`
must select a single item in the array `a`.
Parameters
----------
\*args : Arguments
If one argument: a scalar, only used in case `a` is of size 1.
If two arguments: the last argument is the value to be set
and must be a scalar, the first argument specifies a single array
element location. It is either an int or a tuple.
Notes
-----
Compared to indexing syntax, `itemset` provides some speed increase
for placing a scalar into a particular location in an `ndarray`,
if you must do this. However, generally this is discouraged:
among other problems, it complicates the appearance of the code.
Also, when using `itemset` (and `item`) inside a loop, be sure
to assign the methods to a local variable to avoid the attribute
look-up at each loop iteration.
Examples
--------
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[3, 1, 7],
[2, 8, 3],
[8, 5, 3]])
>>> x.itemset(4, 0)
>>> x.itemset((2, 2), 9)
>>> x
array([[3, 1, 7],
[2, 0, 3],
[8, 5, 9]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""
a.max(axis=None, out=None)
Return the maximum along a given axis.
Refer to `numpy.amax` for full documentation.
See Also
--------
numpy.amax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
"""
a.mean(axis=None, dtype=None, out=None, keepdims=False)
Returns the average of the array elements along given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""
a.min(axis=None, out=None, keepdims=False)
Return the minimum along a given axis.
Refer to `numpy.amin` for full documentation.
See Also
--------
numpy.amin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'shares_memory',
"""
shares_memory(a, b, max_work=None)
Determine if two arrays share memory
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem (maximum number
of candidate solutions to consider). The following special
values are recognized:
max_work=MAY_SHARE_EXACT (default)
The problem is solved exactly. In this case, the function returns
True only if there is an element shared between the arrays.
max_work=MAY_SHARE_BOUNDS
Only the memory bounds of a and b are checked.
Raises
------
numpy.TooHardError
Exceeded max_work.
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
""")
add_newdoc('numpy.core.multiarray', 'may_share_memory',
"""
may_share_memory(a, b, max_work=None)
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem. See
`shares_memory` for details. Default for ``may_share_memory``
is to do a bounds check.
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S')
Return the array with the same data viewed with a different byte order.
Equivalent to::
arr.view(arr.dtype.newbytorder(new_order))
Changes are also made in all fields and sub-arrays of the array data
type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_arr : array
New array object with the dtype reflecting given change to the
byte order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
"""
a.nonzero()
Return the indices of the elements that are non-zero.
Refer to `numpy.nonzero` for full documentation.
See Also
--------
numpy.nonzero : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""
a.prod(axis=None, dtype=None, out=None, keepdims=False)
Return the product of the array elements over the given axis
Refer to `numpy.prod` for full documentation.
See Also
--------
numpy.prod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
"""
a.ptp(axis=None, out=None)
Peak to peak (maximum - minimum) value along a given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""
a.put(indices, values, mode='raise')
Set ``a.flat[n] = values[n]`` for all `n` in indices.
Refer to `numpy.put` for full documentation.
See Also
--------
numpy.put : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'copyto',
"""
copyto(dst, src, casting='same_kind', where=None)
Copies values from one array to another, broadcasting as necessary.
Raises a TypeError if the `casting` rule is violated, and if
`where` is provided, it selects which elements to copy.
.. versionadded:: 1.7.0
Parameters
----------
dst : ndarray
The array into which values are copied.
src : array_like
The array from which values are copied.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when copying.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `dst`, and selects elements to copy from `src` to `dst`
wherever it contains the value True.
""")
add_newdoc('numpy.core.multiarray', 'putmask',
"""
putmask(a, mask, values)
Changes elements of an array based on conditional and input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
This gives behavior different from ``a[mask] = values``.
Parameters
----------
a : array_like
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
values : array_like
Values to put into `a` where `mask` is True. If `values` is smaller
than `a` it will be repeated.
See Also
--------
place, put, take, copyto
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> np.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = np.arange(5)
>>> np.putmask(x, x>1, [-33, -44])
>>> x
array([ 0, 1, -33, -44, -33])
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""
a.ravel([order])
Return a flattened array.
Refer to `numpy.ravel` for full documentation.
See Also
--------
numpy.ravel : equivalent function
ndarray.flat : a flat iterator on the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
"""
a.repeat(repeats, axis=None)
Repeat elements of an array.
Refer to `numpy.repeat` for full documentation.
See Also
--------
numpy.repeat : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
"""
a.reshape(shape, order='C')
Returns an array containing the same data with a new shape.
Refer to `numpy.reshape` for full documentation.
See Also
--------
numpy.reshape : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
"""
a.resize(new_shape, refcheck=True)
Change shape and size of array in-place.
Parameters
----------
new_shape : tuple of ints, or `n` ints
Shape of resized array.
refcheck : bool, optional
If False, reference count will not be checked. Default is True.
Returns
-------
None
Raises
------
ValueError
If `a` does not own its own data or references or views to it exist,
and the data memory must be changed.
SystemError
If the `order` keyword argument is specified. This behaviour is a
bug in NumPy.
See Also
--------
resize : Return a new array with the specified shape.
Notes
-----
This reallocates space for the data area if necessary.
Only contiguous arrays (data elements consecutive in memory) can be
resized.
The purpose of the reference count check is to make sure you
do not use this array as a buffer for another Python object and then
reallocate the memory. However, reference counts can increase in
other ways so if you are sure that you have not shared the memory
for this array with another Python object, then you may safely set
`refcheck` to False.
Examples
--------
Shrinking an array: array is flattened (in the order that the data are
stored in memory), resized, and reshaped:
>>> a = np.array([[0, 1], [2, 3]], order='C')
>>> a.resize((2, 1))
>>> a
array([[0],
[1]])
>>> a = np.array([[0, 1], [2, 3]], order='F')
>>> a.resize((2, 1))
>>> a
array([[0],
[2]])
Enlarging an array: as above, but missing entries are filled with zeros:
>>> b = np.array([[0, 1], [2, 3]])
>>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
>>> b
array([[0, 1, 2],
[3, 0, 0]])
Referencing an array prevents resizing...
>>> c = a
>>> a.resize((1, 1))
Traceback (most recent call last):
...
ValueError: cannot resize an array that has been referenced ...
Unless `refcheck` is False:
>>> a.resize((1, 1), refcheck=False)
>>> a
array([[0]])
>>> c
array([[0]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
"""
a.round(decimals=0, out=None)
Return `a` with each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
"""
a.searchsorted(v, side='left', sorter=None)
Find indices where elements of v should be inserted in a to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
"""
a.setfield(val, dtype, offset=0)
Put a value into a specified place in a field defined by a data-type.
Place `val` into `a`'s field defined by `dtype` and beginning `offset`
bytes into the field.
Parameters
----------
val : object
Value to be placed in field.
dtype : dtype object
Data-type of the field in which to place `val`.
offset : int, optional
The number of bytes into the field at which to place `val`.
Returns
-------
None
See Also
--------
getfield
Examples
--------
>>> x = np.eye(3)
>>> x.getfield(np.float64)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> x.setfield(3, np.int32)
>>> x.getfield(np.int32)
array([[3, 3, 3],
[3, 3, 3],
[3, 3, 3]])
>>> x
array([[ 1.00000000e+000, 1.48219694e-323, 1.48219694e-323],
[ 1.48219694e-323, 1.00000000e+000, 1.48219694e-323],
[ 1.48219694e-323, 1.48219694e-323, 1.00000000e+000]])
>>> x.setfield(np.eye(3), np.int32)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
"""
a.setflags(write=None, align=None, uic=None)
Set array flags WRITEABLE, ALIGNED, and UPDATEIFCOPY, respectively.
These Boolean-valued flags affect how numpy interprets the memory
area used by `a` (see Notes below). The ALIGNED flag can only
be set to True if the data is actually aligned according to the type.
The UPDATEIFCOPY flag can never be set to True. The flag WRITEABLE
can only be set to True if the array owns its own memory, or the
ultimate owner of the memory exposes a writeable buffer interface,
or is a string. (The exception for string is made so that unpickling
can be done without copying memory.)
Parameters
----------
write : bool, optional
Describes whether or not `a` can be written to.
align : bool, optional
Describes whether or not `a` is aligned properly for its type.
uic : bool, optional
Describes whether or not `a` is a copy of another "base" array.
Notes
-----
Array flags provide information about how the memory area used
for the array is to be interpreted. There are 6 Boolean flags
in use, only three of which can be changed by the user:
UPDATEIFCOPY, WRITEABLE, and ALIGNED.
WRITEABLE (W) the data area can be written to;
ALIGNED (A) the data and strides are aligned appropriately for the hardware
(as determined by the compiler);
UPDATEIFCOPY (U) this array is a copy of some other array (referenced
by .base). When this array is deallocated, the base array will be
updated with the contents of this array.
All flags can be accessed using their first (upper case) letter as well
as the full name.
Examples
--------
>>> y
array([[3, 1, 7],
[2, 0, 0],
[8, 5, 9]])
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
>>> y.setflags(write=0, align=0)
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : False
ALIGNED : False
UPDATEIFCOPY : False
>>> y.setflags(uic=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: cannot set UPDATEIFCOPY flag to True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""
a.sort(axis=-1, kind='quicksort', order=None)
Sort an array, in-place.
Parameters
----------
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.sort : Return a sorted copy of an array.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in sorted array.
partition: Partial sort.
Notes
-----
See ``sort`` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4], [3,1]])
>>> a.sort(axis=1)
>>> a
array([[1, 4],
[1, 3]])
>>> a.sort(axis=0)
>>> a
array([[1, 3],
[1, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
>>> a.sort(order='y')
>>> a
array([('c', 1), ('a', 2)],
dtype=[('x', '|S1'), ('y', '<i4')])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
"""
a.partition(kth, axis=-1, kind='introselect', order=None)
Rearranges the elements in the array in such a way that value of the
element in kth position is in the position it would be in a sorted array.
All elements smaller than the kth element are moved before this element and
all equal or greater are moved behind it. The ordering of the elements in
the two partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
kth : int or sequence of ints
Element index to partition by. The kth element value will be in its
final sorted position and all smaller elements will be moved before it
and all equal or greater elements behind it.
The order all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.partition : Return a parititioned copy of an array.
argpartition : Indirect partition.
sort : Full sort.
Notes
-----
See ``np.partition`` for notes on the different algorithms.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> a.partition(a, 3)
>>> a
array([2, 1, 3, 4])
>>> a.partition((1, 3))
array([1, 2, 3, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
"""
a.squeeze(axis=None)
Remove single-dimensional entries from the shape of `a`.
Refer to `numpy.squeeze` for full documentation.
See Also
--------
numpy.squeeze : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
"""
a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the standard deviation of the array elements along given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""
a.sum(axis=None, dtype=None, out=None, keepdims=False)
Return the sum of the array elements over the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
"""
a.swapaxes(axis1, axis2)
Return a view of the array with `axis1` and `axis2` interchanged.
Refer to `numpy.swapaxes` for full documentation.
See Also
--------
numpy.swapaxes : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
"""
a.take(indices, axis=None, out=None, mode='raise')
Return an array formed from the elements of `a` at the given indices.
Refer to `numpy.take` for full documentation.
See Also
--------
numpy.take : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
"""
a.tofile(fid, sep="", format="%s")
Write array to a file as text or binary (default).
Data is always written in 'C' order, independent of the order of `a`.
The data produced by this method can be recovered using the function
fromfile().
Parameters
----------
fid : file or str
An open file object, or a string containing a filename.
sep : str
Separator between array items for text output.
If "" (empty), a binary file is written, equivalent to
``file.write(a.tobytes())``.
format : str
Format string for text file output.
Each entry in the array is formatted to text by first converting
it to the closest Python type, and then using "format" % item.
Notes
-----
This is a convenience function for quick storage of array data.
Information on endianness and precision is lost, so this method is not a
good choice for files intended to archive data or transport data between
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""
a.tolist()
Return the array as a (possibly nested) list.
Return a copy of the array data as a (nested) Python list.
Data items are converted to the nearest compatible Python type.
Parameters
----------
none
Returns
-------
y : list
The possibly nested list of array elements.
Notes
-----
The array may be recreated, ``a = np.array(a.tolist())``.
Examples
--------
>>> a = np.array([1, 2])
>>> a.tolist()
[1, 2]
>>> a = np.array([[1, 2], [3, 4]])
>>> list(a)
[array([1, 2]), array([3, 4])]
>>> a.tolist()
[[1, 2], [3, 4]]
"""))
tobytesdoc = """
a.{name}(order='C')
Construct Python bytes containing the raw data bytes in the array.
Constructs Python bytes showing a copy of the raw contents of
data memory. The bytes object can be produced in either 'C' or 'Fortran',
or 'Any' order (the default is 'C'-order). 'Any' order means C-order
unless the F_CONTIGUOUS flag in the array is set, in which case it
means 'Fortran' order.
{deprecated}
Parameters
----------
order : {{'C', 'F', None}}, optional
Order of the data for multidimensional arrays:
C, Fortran, or the same as for the original array.
Returns
-------
s : bytes
Python bytes exhibiting a copy of `a`'s raw data.
Examples
--------
>>> x = np.array([[0, 1], [2, 3]])
>>> x.tobytes()
b'\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
>>> x.tobytes('C') == x.tobytes()
True
>>> x.tobytes('F')
b'\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
"""
add_newdoc('numpy.core.multiarray', 'ndarray',
('tostring', tobytesdoc.format(name='tostring',
deprecated=
'This function is a compatibility '
'alias for tobytes. Despite its '
'name it returns bytes not '
'strings.')))
add_newdoc('numpy.core.multiarray', 'ndarray',
('tobytes', tobytesdoc.format(name='tobytes',
deprecated='.. versionadded:: 1.9.0')))
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
"""
a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
Return the sum along diagonals of the array.
Refer to `numpy.trace` for full documentation.
See Also
--------
numpy.trace : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
"""
a.transpose(*axes)
Returns a view of the array with axes transposed.
For a 1-D array, this has no effect. (To change between column and
row vectors, first cast the 1-D array into a matrix object.)
For a 2-D array, this is the usual matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted (see Examples). If axes are not provided and
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
Parameters
----------
axes : None, tuple of ints, or `n` ints
* None or no argument: reverses the order of the axes.
* tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
`i`-th axis becomes `a.transpose()`'s `j`-th axis.
* `n` ints: same as an n-tuple of the same ints (this form is
intended simply as a "convenience" alternative to the tuple form)
Returns
-------
out : ndarray
View of `a`, with axes suitably permuted.
See Also
--------
ndarray.T : Array property returning the array transposed.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> a
array([[1, 2],
[3, 4]])
>>> a.transpose()
array([[1, 3],
[2, 4]])
>>> a.transpose((1, 0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1, 0)
array([[1, 3],
[2, 4]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
"""
a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the variance of the array elements, along given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""
a.view(dtype=None, type=None)
New view of array with the same data.
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16. The
default, None, results in the view having the same data-type as `a`.
This argument can also be specified as an ndarray sub-class, which
then specifies the type of the returned object (this is equivalent to
setting the ``type`` parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, the
default None results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
Examples
--------
>>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
Viewing array data using a different type and dtype:
>>> y = x.view(dtype=np.int16, type=np.matrix)
>>> y
matrix([[513]], dtype=int16)
>>> print(type(y))
<class 'numpy.matrixlib.defmatrix.matrix'>
Creating a view on a structured array so it can be used in calculations
>>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
>>> xv = x.view(dtype=np.int8).reshape(-1,2)
>>> xv
array([[1, 2],
[3, 4]], dtype=int8)
>>> xv.mean(0)
array([ 2., 3.])
Making changes to the view changes the underlying array
>>> xv[0,1] = 20
>>> print(x)
[(1, 20) (3, 4)]
Using a view to convert an array to a recarray:
>>> z = x.view(np.recarray)
>>> z.a
array([1], dtype=int8)
Views share data:
>>> x[0] = (9, 10)
>>> z[0]
(9, 10)
Views that change the dtype size (bytes per entry) should normally be
avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
>>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16)
>>> y = x[:, 0:2]
>>> y
array([[1, 2],
[4, 5]], dtype=int16)
>>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: new type not compatible with array.
>>> z = y.copy()
>>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
array([[(1, 2)],
[(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')])
"""))
##############################################################################
#
# umath functions
#
##############################################################################
add_newdoc('numpy.core.umath', 'frompyfunc',
"""
frompyfunc(func, nin, nout)
Takes an arbitrary Python function and returns a NumPy ufunc.
Can be used, for example, to add broadcasting to a built-in Python
function (see Examples section).
Parameters
----------
func : Python function object
An arbitrary Python function.
nin : int
The number of input arguments.
nout : int
The number of objects returned by `func`.
Returns
-------
out : ufunc
Returns a NumPy universal function (``ufunc``) object.
See Also
--------
vectorize : evaluates pyfunc over input arrays using broadcasting rules of numpy
Notes
-----
The returned ufunc always returns PyObject arrays.
Examples
--------
Use frompyfunc to add broadcasting to the Python function ``oct``:
>>> oct_array = np.frompyfunc(oct, 1, 1)
>>> oct_array(np.array((10, 30, 100)))
array([012, 036, 0144], dtype=object)
>>> np.array((oct(10), oct(30), oct(100))) # for comparison
array(['012', '036', '0144'],
dtype='|S4')
""")
add_newdoc('numpy.core.umath', 'geterrobj',
"""
geterrobj()
Return the current object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `geterrobj` is used internally by the other
functions that get and set error handling behavior (`geterr`, `seterr`,
`geterrcall`, `seterrcall`).
Returns
-------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
seterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrobj() # first get the defaults
[10000, 0, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> old_bufsize = np.setbufsize(20000)
>>> old_err = np.seterr(divide='raise')
>>> old_handler = np.seterrcall(err_handler)
>>> np.geterrobj()
[20000, 2, <function err_handler at 0x91dcaac>]
>>> old_err = np.seterr(all='ignore')
>>> np.base_repr(np.geterrobj()[1], 8)
'0'
>>> old_err = np.seterr(divide='warn', over='log', under='call',
invalid='print')
>>> np.base_repr(np.geterrobj()[1], 8)
'4351'
""")
add_newdoc('numpy.core.umath', 'seterrobj',
"""
seterrobj(errobj)
Set the object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `seterrobj` is used internally by the other
functions that set error handling behavior (`seterr`, `seterrcall`).
Parameters
----------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
geterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> old_errobj = np.geterrobj() # first get the defaults
>>> old_errobj
[10000, 0, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> new_errobj = [20000, 12, err_handler]
>>> np.seterrobj(new_errobj)
>>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn')
'14'
>>> np.geterr()
{'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.geterrcall() is err_handler
True
""")
##############################################################################
#
# compiled_base functions
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'digitize',
"""
digitize(x, bins, right=False)
Return the indices of the bins to which each value in input array belongs.
Each index ``i`` returned is such that ``bins[i-1] <= x < bins[i]`` if
`bins` is monotonically increasing, or ``bins[i-1] > x >= bins[i]`` if
`bins` is monotonically decreasing. If values in `x` are beyond the
bounds of `bins`, 0 or ``len(bins)`` is returned as appropriate. If right
is True, then the right bin is closed so that the index ``i`` is such
that ``bins[i-1] < x <= bins[i]`` or bins[i-1] >= x > bins[i]`` if `bins`
is monotonically increasing or decreasing, respectively.
Parameters
----------
x : array_like
Input array to be binned. Prior to NumPy 1.10.0, this array had to
be 1-dimensional, but can now have any shape.
bins : array_like
Array of bins. It has to be 1-dimensional and monotonic.
right : bool, optional
Indicating whether the intervals include the right or the left bin
edge. Default behavior is (right==False) indicating that the interval
does not include the right edge. The left bin end is open in this
case, i.e., bins[i-1] <= x < bins[i] is the default behavior for
monotonically increasing bins.
Returns
-------
out : ndarray of ints
Output array of indices, of same shape as `x`.
Raises
------
ValueError
If `bins` is not monotonic.
TypeError
If the type of the input is complex.
See Also
--------
bincount, histogram, unique
Notes
-----
If values in `x` are such that they fall outside the bin range,
attempting to index `bins` with the indices that `digitize` returns
will result in an IndexError.
.. versionadded:: 1.10.0
`np.digitize` is implemented in terms of `np.searchsorted`. This means
that a binary search is used to bin the values, which scales much better
for larger number of bins than the previous linear search. It also removes
the requirement for the input array to be 1-dimensional.
Examples
--------
>>> x = np.array([0.2, 6.4, 3.0, 1.6])
>>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
>>> inds = np.digitize(x, bins)
>>> inds
array([1, 4, 3, 2])
>>> for n in range(x.size):
... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]])
...
0.0 <= 0.2 < 1.0
4.0 <= 6.4 < 10.0
2.5 <= 3.0 < 4.0
1.0 <= 1.6 < 2.5
>>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.])
>>> bins = np.array([0, 5, 10, 15, 20])
>>> np.digitize(x,bins,right=True)
array([1, 2, 3, 4, 4])
>>> np.digitize(x,bins,right=False)
array([1, 3, 3, 4, 5])
""")
add_newdoc('numpy.core.multiarray', 'bincount',
"""
bincount(x, weights=None, minlength=None)
Count number of occurrences of each value in array of non-negative ints.
The number of bins (of size 1) is one larger than the largest value in
`x`. If `minlength` is specified, there will be at least this number
of bins in the output array (though it will be longer if necessary,
depending on the contents of `x`).
Each bin gives the number of occurrences of its index value in `x`.
If `weights` is specified the input array is weighted by it, i.e. if a
value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
of ``out[n] += 1``.
Parameters
----------
x : array_like, 1 dimension, nonnegative ints
Input array.
weights : array_like, optional
Weights, array of the same shape as `x`.
minlength : int, optional
A minimum number of bins for the output array.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray of ints
The result of binning the input array.
The length of `out` is equal to ``np.amax(x)+1``.
Raises
------
ValueError
If the input is not 1-dimensional, or contains elements with negative
values, or if `minlength` is non-positive.
TypeError
If the type of the input is float or complex.
See Also
--------
histogram, digitize, unique
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
The input array needs to be of integer dtype, otherwise a
TypeError is raised:
>>> np.bincount(np.arange(5, dtype=np.float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
A possible use of ``bincount`` is to perform sums over
variable-size chunks of an array, using the ``weights`` keyword.
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
""")
add_newdoc('numpy.core.multiarray', 'ravel_multi_index',
"""
ravel_multi_index(multi_index, dims, mode='raise', order='C')
Converts a tuple of index arrays into an array of flat
indices, applying boundary modes to the multi-index.
Parameters
----------
multi_index : tuple of array_like
A tuple of integer arrays, one array for each dimension.
dims : tuple of ints
The shape of array into which the indices from ``multi_index`` apply.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices are handled. Can specify
either one mode or a tuple of modes, one mode per index.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
In 'clip' mode, a negative index which would normally
wrap will clip to 0 instead.
order : {'C', 'F'}, optional
Determines whether the multi-index should be viewed as
indexing in row-major (C-style) or column-major
(Fortran-style) order.
Returns
-------
raveled_indices : ndarray
An array of indices into the flattened version of an array
of dimensions ``dims``.
See Also
--------
unravel_index
Notes
-----
.. versionadded:: 1.6.0
Examples
--------
>>> arr = np.array([[3,6,6],[4,5,1]])
>>> np.ravel_multi_index(arr, (7,6))
array([22, 41, 37])
>>> np.ravel_multi_index(arr, (7,6), order='F')
array([31, 41, 13])
>>> np.ravel_multi_index(arr, (4,6), mode='clip')
array([22, 23, 19])
>>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
array([12, 13, 13])
>>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
1621
""")
add_newdoc('numpy.core.multiarray', 'unravel_index',
"""
unravel_index(indices, dims, order='C')
Converts a flat index or array of flat indices into a tuple
of coordinate arrays.
Parameters
----------
indices : array_like
An integer array whose elements are indices into the flattened
version of an array of dimensions ``dims``. Before version 1.6.0,
this function accepted just one index value.
dims : tuple of ints
The shape of the array to use for unraveling ``indices``.
order : {'C', 'F'}, optional
Determines whether the indices should be viewed as indexing in
row-major (C-style) or column-major (Fortran-style) order.
.. versionadded:: 1.6.0
Returns
-------
unraveled_coords : tuple of ndarray
Each array in the tuple has the same shape as the ``indices``
array.
See Also
--------
ravel_multi_index
Examples
--------
>>> np.unravel_index([22, 41, 37], (7,6))
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index([31, 41, 13], (7,6), order='F')
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index(1621, (6,7,8,9))
(3, 1, 4, 1)
""")
add_newdoc('numpy.core.multiarray', 'add_docstring',
"""
add_docstring(obj, docstring)
Add a docstring to a built-in obj if possible.
If the obj already has a docstring raise a RuntimeError
If this routine does not know how to add a docstring to the object
raise a TypeError
""")
add_newdoc('numpy.core.umath', '_add_newdoc_ufunc',
"""
add_ufunc_docstring(ufunc, new_docstring)
Replace the docstring for a ufunc with new_docstring.
This method will only work if the current docstring for
the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.)
Parameters
----------
ufunc : numpy.ufunc
A ufunc whose current doc is NULL.
new_docstring : string
The new docstring for the ufunc.
Notes
-----
This method allocates memory for new_docstring on
the heap. Technically this creates a mempory leak, since this
memory will not be reclaimed until the end of the program
even if the ufunc itself is removed. However this will only
be a problem if the user is repeatedly creating ufuncs with
no documentation, adding documentation via add_newdoc_ufunc,
and then throwing away the ufunc.
""")
add_newdoc('numpy.core.multiarray', 'packbits',
"""
packbits(myarray, axis=None)
Packs the elements of a binary-valued array into bits in a uint8 array.
The result is padded to full bytes by inserting zero bits at the end.
Parameters
----------
myarray : array_like
An integer type array whose elements should be packed to bits.
axis : int, optional
The dimension over which bit-packing is done.
``None`` implies packing the flattened array.
Returns
-------
packed : ndarray
Array of type uint8 whose elements represent bits corresponding to the
logical (0 or nonzero) value of the input elements. The shape of
`packed` has the same number of dimensions as the input (unless `axis`
is None, in which case the output is 1-D).
See Also
--------
unpackbits: Unpacks elements of a uint8 array into a binary-valued output
array.
Examples
--------
>>> a = np.array([[[1,0,1],
... [0,1,0]],
... [[1,1,0],
... [0,0,1]]])
>>> b = np.packbits(a, axis=-1)
>>> b
array([[[160],[64]],[[192],[32]]], dtype=uint8)
Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
and 32 = 0010 0000.
""")
add_newdoc('numpy.core.multiarray', 'unpackbits',
"""
unpackbits(myarray, axis=None)
Unpacks elements of a uint8 array into a binary-valued output array.
Each element of `myarray` represents a bit-field that should be unpacked
into a binary-valued output array. The shape of the output array is either
1-D (if `axis` is None) or the same shape as the input array with unpacking
done along the axis specified.
Parameters
----------
myarray : ndarray, uint8 type
Input array.
axis : int, optional
Unpacks along this axis.
Returns
-------
unpacked : ndarray, uint8 type
The elements are binary-valued (0 or 1).
See Also
--------
packbits : Packs the elements of a binary-valued array into bits in a uint8
array.
Examples
--------
>>> a = np.array([[2], [7], [23]], dtype=np.uint8)
>>> a
array([[ 2],
[ 7],
[23]], dtype=uint8)
>>> b = np.unpackbits(a, axis=1)
>>> b
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
""")
##############################################################################
#
# Documentation for ufunc attributes and methods
#
##############################################################################
##############################################################################
#
# ufunc object
#
##############################################################################
add_newdoc('numpy.core', 'ufunc',
"""
Functions that operate element by element on whole arrays.
To see the documentation for a specific ufunc, use np.info(). For
example, np.info(np.sin). Because ufuncs are written in C
(for speed) and linked into Python with NumPy's ufunc facility,
Python's help() function finds this page whenever help() is called
on a ufunc.
A detailed explanation of ufuncs can be found in the "ufuncs.rst"
file in the NumPy reference guide.
Unary ufuncs:
=============
op(X, out=None)
Apply op to X elementwise
Parameters
----------
X : array_like
Input array.
out : array_like
An array to store the output. Must be the same shape as `X`.
Returns
-------
r : array_like
`r` will have the same shape as `X`; if out is provided, `r`
will be equal to out.
Binary ufuncs:
==============
op(X, Y, out=None)
Apply `op` to `X` and `Y` elementwise. May "broadcast" to make
the shapes of `X` and `Y` congruent.
The broadcasting rules are:
* Dimensions of length 1 may be prepended to either array.
* Arrays may be repeated along dimensions of length 1.
Parameters
----------
X : array_like
First input array.
Y : array_like
Second input array.
out : array_like
An array to store the output. Must be the same shape as the
output would have.
Returns
-------
r : array_like
The return value; if out is provided, `r` will be equal to out.
""")
##############################################################################
#
# ufunc attributes
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('identity',
"""
The identity value.
Data attribute containing the identity element for the ufunc, if it has one.
If it does not, the attribute value is None.
Examples
--------
>>> np.add.identity
0
>>> np.multiply.identity
1
>>> np.power.identity
1
>>> print(np.exp.identity)
None
"""))
add_newdoc('numpy.core', 'ufunc', ('nargs',
"""
The number of arguments.
Data attribute containing the number of arguments the ufunc takes, including
optional ones.
Notes
-----
Typically this value will be one more than what you might expect because all
ufuncs take the optional "out" argument.
Examples
--------
>>> np.add.nargs
3
>>> np.multiply.nargs
3
>>> np.power.nargs
3
>>> np.exp.nargs
2
"""))
add_newdoc('numpy.core', 'ufunc', ('nin',
"""
The number of inputs.
Data attribute containing the number of arguments the ufunc treats as input.
Examples
--------
>>> np.add.nin
2
>>> np.multiply.nin
2
>>> np.power.nin
2
>>> np.exp.nin
1
"""))
add_newdoc('numpy.core', 'ufunc', ('nout',
"""
The number of outputs.
Data attribute containing the number of arguments the ufunc treats as output.
Notes
-----
Since all ufuncs can take output arguments, this will always be (at least) 1.
Examples
--------
>>> np.add.nout
1
>>> np.multiply.nout
1
>>> np.power.nout
1
>>> np.exp.nout
1
"""))
add_newdoc('numpy.core', 'ufunc', ('ntypes',
"""
The number of types.
The number of numerical NumPy types - of which there are 18 total - on which
the ufunc can operate.
See Also
--------
numpy.ufunc.types
Examples
--------
>>> np.add.ntypes
18
>>> np.multiply.ntypes
18
>>> np.power.ntypes
17
>>> np.exp.ntypes
7
>>> np.remainder.ntypes
14
"""))
add_newdoc('numpy.core', 'ufunc', ('types',
"""
Returns a list with types grouped input->output.
Data attribute listing the data-type "Domain-Range" groupings the ufunc can
deliver. The data-types are given using the character codes.
See Also
--------
numpy.ufunc.ntypes
Examples
--------
>>> np.add.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.multiply.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.power.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G',
'OO->O']
>>> np.exp.types
['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
>>> np.remainder.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O']
"""))
##############################################################################
#
# ufunc methods
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
reduce(a, axis=0, dtype=None, out=None, keepdims=False)
Reduces `a`'s dimension by one, by applying ufunc along one axis.
Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
:math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
For a one-dimensional array, reduce produces results equivalent to:
::
r = op.identity # op = ufunc
for i in range(len(A)):
r = op(r, A[i])
return r
For example, add.reduce() is equivalent to sum().
Parameters
----------
a : array_like
The array to act on.
axis : None or int or tuple of ints, optional
Axis or axes along which a reduction is performed.
The default (`axis` = 0) is perform a reduction over the first
dimension of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is `None`, a reduction is performed over all the axes.
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
For operations which are either not commutative or not associative,
doing a reduction over multiple axes is not well-defined. The
ufuncs do not currently raise an exception in this case, but will
likely do so in the future.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data-type of the output array if this is provided, or
the data-type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided, a
freshly-allocated array is returned.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.7.0
Returns
-------
r : ndarray
The reduced array. If `out` was supplied, `r` is a reference to it.
Examples
--------
>>> np.multiply.reduce([2,3,5])
30
A multi-dimensional array example:
>>> X = np.arange(8).reshape((2,2,2))
>>> X
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.add.reduce(X, 0)
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X) # confirm: default axis value is 0
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X, 1)
array([[ 2, 4],
[10, 12]])
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
"""))
add_newdoc('numpy.core', 'ufunc', ('accumulate',
"""
accumulate(array, axis=0, dtype=None, out=None)
Accumulate the result of applying the operator to all elements.
For a one-dimensional array, accumulate produces results equivalent to::
r = np.empty(len(A))
t = op.identity # op = the ufunc being applied to A's elements
for i in range(len(A)):
t = op(t, A[i])
r[i] = t
return r
For example, add.accumulate() is equivalent to np.cumsum().
For a multi-dimensional array, accumulate is applied along only one
axis (axis zero by default; see Examples below) so repeated use is
necessary if one wants to accumulate over multiple axes.
Parameters
----------
array : array_like
The array to act on.
axis : int, optional
The axis along which to apply the accumulation; default is zero.
dtype : data-type code, optional
The data-type used to represent the intermediate results. Defaults
to the data-type of the output array if such is provided, or the
the data-type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided a
freshly-allocated array is returned.
Returns
-------
r : ndarray
The accumulated values. If `out` was supplied, `r` is a reference to
`out`.
Examples
--------
1-D array examples:
>>> np.add.accumulate([2, 3, 5])
array([ 2, 5, 10])
>>> np.multiply.accumulate([2, 3, 5])
array([ 2, 6, 30])
2-D array examples:
>>> I = np.eye(2)
>>> I
array([[ 1., 0.],
[ 0., 1.]])
Accumulate along axis 0 (rows), down columns:
>>> np.add.accumulate(I, 0)
array([[ 1., 0.],
[ 1., 1.]])
>>> np.add.accumulate(I) # no axis specified = axis zero
array([[ 1., 0.],
[ 1., 1.]])
Accumulate along axis 1 (columns), through rows:
>>> np.add.accumulate(I, 1)
array([[ 1., 1.],
[ 0., 1.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('reduceat',
"""
reduceat(a, indices, axis=0, dtype=None, out=None)
Performs a (local) reduce with specified slices over a single axis.
For i in ``range(len(indices))``, `reduceat` computes
``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th
generalized "row" parallel to `axis` in the final result (i.e., in a
2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
`axis = 1`, it becomes the i-th column). There are three exceptions to this:
* when ``i = len(indices) - 1`` (so for the last index),
``indices[i+1] = a.shape[axis]``.
* if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
simply ``a[indices[i]]``.
* if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised.
The shape of the output depends on the size of `indices`, and may be
larger than `a` (this happens if ``len(indices) > a.shape[axis]``).
Parameters
----------
a : array_like
The array to act on.
indices : array_like
Paired indices, comma separated (not colon), specifying slices to
reduce.
axis : int, optional
The axis along which to apply the reduceat.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data type of the output array if this is provided, or
the data type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided a
freshly-allocated array is returned.
Returns
-------
r : ndarray
The reduced values. If `out` was supplied, `r` is a reference to
`out`.
Notes
-----
A descriptive example:
If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as
``ufunc.reduceat(a, indices)[::2]`` where `indices` is
``range(len(array) - 1)`` with a zero placed
in every other element:
``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``.
Don't be fooled by this attribute's name: `reduceat(a)` is not
necessarily smaller than `a`.
Examples
--------
To take the running sum of four successive values:
>>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
array([ 6, 10, 14, 18])
A 2-D example:
>>> x = np.linspace(0, 15, 16).reshape(4,4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
::
# reduce such that the result has the following five rows:
# [row1 + row2 + row3]
# [row4]
# [row2]
# [row3]
# [row1 + row2 + row3 + row4]
>>> np.add.reduceat(x, [0, 3, 1, 2, 0])
array([[ 12., 15., 18., 21.],
[ 12., 13., 14., 15.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 24., 28., 32., 36.]])
::
# reduce such that result has the following two columns:
# [col1 * col2 * col3, col4]
>>> np.multiply.reduceat(x, [0, 3], 1)
array([[ 0., 3.],
[ 120., 7.],
[ 720., 11.],
[ 2184., 15.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('outer',
"""
outer(A, B)
Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of
``op.outer(A, B)`` is an array of dimension M + N such that:
.. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] =
op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}])
For `A` and `B` one-dimensional, this is equivalent to::
r = empty(len(A),len(B))
for i in range(len(A)):
for j in range(len(B)):
r[i,j] = op(A[i], B[j]) # op = ufunc in question
Parameters
----------
A : array_like
First array
B : array_like
Second array
Returns
-------
r : ndarray
Output array
See Also
--------
numpy.outer
Examples
--------
>>> np.multiply.outer([1, 2, 3], [4, 5, 6])
array([[ 4, 5, 6],
[ 8, 10, 12],
[12, 15, 18]])
A multi-dimensional example:
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> A.shape
(2, 3)
>>> B = np.array([[1, 2, 3, 4]])
>>> B.shape
(1, 4)
>>> C = np.multiply.outer(A, B)
>>> C.shape; C
(2, 3, 1, 4)
array([[[[ 1, 2, 3, 4]],
[[ 2, 4, 6, 8]],
[[ 3, 6, 9, 12]]],
[[[ 4, 8, 12, 16]],
[[ 5, 10, 15, 20]],
[[ 6, 12, 18, 24]]]])
"""))
add_newdoc('numpy.core', 'ufunc', ('at',
"""
at(a, indices, b=None)
Performs unbuffered in place operation on operand 'a' for elements
specified by 'indices'. For addition ufunc, this method is equivalent to
`a[indices] += b`, except that results are accumulated for elements that
are indexed more than once. For example, `a[[0,0]] += 1` will only
increment the first element once because of buffering, whereas
`add.at(a, [0,0], 1)` will increment the first element twice.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
The array to perform in place operation on.
indices : array_like or tuple
Array like index object or slice object for indexing into first
operand. If first operand has multiple dimensions, indices can be a
tuple of array like index objects or slice objects.
b : array_like
Second operand for ufuncs requiring two operands. Operand must be
broadcastable over first operand after indexing or slicing.
Examples
--------
Set items 0 and 1 to their negative values:
>>> a = np.array([1, 2, 3, 4])
>>> np.negative.at(a, [0, 1])
>>> print(a)
array([-1, -2, 3, 4])
::
Increment items 0 and 1, and increment item 2 twice:
>>> a = np.array([1, 2, 3, 4])
>>> np.add.at(a, [0, 1, 2, 2], 1)
>>> print(a)
array([2, 3, 5, 4])
::
Add items 0 and 1 in first array to second array,
and store results in first array:
>>> a = np.array([1, 2, 3, 4])
>>> b = np.array([1, 2])
>>> np.add.at(a, [0, 1], b)
>>> print(a)
array([2, 4, 3, 4])
"""))
##############################################################################
#
# Documentation for dtype attributes and methods
#
##############################################################################
##############################################################################
#
# dtype object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype',
"""
dtype(obj, align=False, copy=False)
Create a data type object.
A numpy array is homogeneous, and contains elements described by a
dtype object. A dtype object can be constructed from different
combinations of fundamental numeric types.
Parameters
----------
obj
Object to be converted to a data type object.
align : bool, optional
Add padding to the fields to match what a C compiler would output
for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
or a comma-separated string. If a struct dtype is being created,
this also sets a sticky alignment flag ``isalignedstruct``.
copy : bool, optional
Make a new copy of the data-type object. If ``False``, the result
may just be a reference to a built-in data-type object.
See also
--------
result_type
Examples
--------
Using array-scalar type:
>>> np.dtype(np.int16)
dtype('int16')
Structured type, one field name 'f1', containing int16:
>>> np.dtype([('f1', np.int16)])
dtype([('f1', '<i2')])
Structured type, one field named 'f1', in itself containing a structured
type with one field:
>>> np.dtype([('f1', [('f1', np.int16)])])
dtype([('f1', [('f1', '<i2')])])
Structured type, two fields: the first field contains an unsigned int, the
second an int32:
>>> np.dtype([('f1', np.uint), ('f2', np.int32)])
dtype([('f1', '<u4'), ('f2', '<i4')])
Using array-protocol type strings:
>>> np.dtype([('a','f8'),('b','S10')])
dtype([('a', '<f8'), ('b', '|S10')])
Using comma-separated field formats. The shape is (2,3):
>>> np.dtype("i4, (2,3)f8")
dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void``
is a flexible type, here of size 10:
>>> np.dtype([('hello',(np.int,3)),('world',np.void,10)])
dtype([('hello', '<i4', 3), ('world', '|V10')])
Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are
the offsets in bytes:
>>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
dtype(('<i2', [('x', '|i1'), ('y', '|i1')]))
Using dictionaries. Two fields named 'gender' and 'age':
>>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
dtype([('gender', '|S1'), ('age', '|u1')])
Offsets in bytes, here 0 and 25:
>>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
dtype([('surname', '|S25'), ('age', '|u1')])
""")
##############################################################################
#
# dtype attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
"""
The required alignment (bytes) of this data-type according to the compiler.
More information is available in the C-API section of the manual.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
"""
A character indicating the byte-order of this data-type object.
One of:
=== ==============
'=' native
'<' little-endian
'>' big-endian
'|' not applicable
=== ==============
All built-in data-type objects have byteorder either '=' or '|'.
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.byteorder
'='
>>> # endian is not relevant for 8 bit numbers
>>> np.dtype('i1').byteorder
'|'
>>> # or ASCII strings
>>> np.dtype('S2').byteorder
'|'
>>> # Even if specific code is given, and it is native
>>> # '=' is the byteorder
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> dt = np.dtype(native_code + 'i2')
>>> dt.byteorder
'='
>>> # Swapped code shows up as itself
>>> dt = np.dtype(swapped_code + 'i2')
>>> dt.byteorder == swapped_code
True
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('char',
"""A unique character code for each of the 21 different built-in types."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
"""
Array-interface compliant full description of the data-type.
The format is that required by the 'descr' key in the
`__array_interface__` attribute.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
"""
Dictionary of named fields defined for this data type, or ``None``.
The dictionary is indexed by keys that are the names of the fields.
Each entry in the dictionary is a tuple fully describing the field::
(dtype, offset[, title])
If present, the optional title can be any object (if it is a string
or unicode then it will also be a key in the fields dictionary,
otherwise it's meta-data). Notice also that the first two elements
of the tuple can be passed directly as arguments to the ``ndarray.getfield``
and ``ndarray.setfield`` methods.
See Also
--------
ndarray.getfield, ndarray.setfield
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> print(dt.fields)
{'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('flags',
"""
Bit-flags describing how this data type is to be interpreted.
Bit-masks are in `numpy.core.multiarray` as the constants
`ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`,
`NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation
of these flags is in C-API documentation; they are largely useful
for user-defined data-types.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject',
"""
Boolean indicating whether this dtype contains any reference-counted
objects in any fields or sub-dtypes.
Recall that what is actually in the ndarray memory representing
the Python object is the memory address of that object (a pointer).
Special handling may be required, and this attribute is useful for
distinguishing data types that may contain arbitrary Python objects
and data-types that won't.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin',
"""
Integer indicating how this dtype relates to the built-in dtypes.
Read-only.
= ========================================================================
0 if this is a structured array type, with fields
1 if this is a dtype compiled into numpy (such as ints, floats etc)
2 if the dtype is for a user-defined numpy type
A user-defined type uses the numpy C-API machinery to extend
numpy to handle a new array type. See
:ref:`user.user-defined-data-types` in the NumPy manual.
= ========================================================================
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.isbuiltin
1
>>> dt = np.dtype('f8')
>>> dt.isbuiltin
1
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.isbuiltin
0
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isnative',
"""
Boolean indicating whether the byte order of this dtype is native
to the platform.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct',
"""
Boolean indicating whether the dtype is a struct which maintains
field alignment. This flag is sticky, so when combining multiple
structs together, it is preserved and produces new dtypes which
are also aligned.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
"""
The element size of this data-type object.
For 18 of the 21 types this number is fixed by the data-type.
For the flexible data-types, this number can be anything.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
"""
A character code (one of 'biufcmMOSUV') identifying the general kind of data.
= ======================
b boolean
i signed integer
u unsigned integer
f floating-point
c complex floating-point
m timedelta
M datetime
O object
S (byte-)string
U Unicode
V void
= ======================
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('name',
"""
A bit-width name for this data-type.
Un-sized flexible data-type objects do not have this attribute.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('names',
"""
Ordered list of field names, or ``None`` if there are no fields.
The names are ordered according to increasing byte offset. This can be
used, for example, to walk through all of the named fields in offset order.
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.names
('name', 'grades')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('num',
"""
A unique number for each of the 21 different built-in types.
These are roughly ordered from least-to-most precision.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
"""
Shape tuple of the sub-array if this data type describes a sub-array,
and ``()`` otherwise.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('str',
"""The array-protocol typestring of this data-type object."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype',
"""
Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and
None otherwise.
The *shape* is the fixed shape of the sub-array described by this
data type, and *item_dtype* the data type of the array.
If a field whose dtype object has this attribute is retrieved,
then the extra dimensions implied by *shape* are tacked on to
the end of the retrieved array.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('type',
"""The type object used to instantiate a scalar of this data-type."""))
##############################################################################
#
# dtype methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new dtype with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. The default value ('S') results in swapping the current
byte order. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The code does a case-insensitive check on the first letter of
`new_order` for these alternatives. For example, any of '>'
or 'B' or 'b' or 'brian' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New dtype object with the given change to the byte order.
Notes
-----
Changes are also made in all fields and sub-arrays of the data type.
Examples
--------
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> native_dt = np.dtype(native_code+'i2')
>>> swapped_dt = np.dtype(swapped_code+'i2')
>>> native_dt.newbyteorder('S') == swapped_dt
True
>>> native_dt.newbyteorder() == swapped_dt
True
>>> native_dt == swapped_dt.newbyteorder('S')
True
>>> native_dt == swapped_dt.newbyteorder('=')
True
>>> native_dt == swapped_dt.newbyteorder('N')
True
>>> native_dt == native_dt.newbyteorder('|')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('<')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('L')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('>')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('B')
True
"""))
##############################################################################
#
# Datetime-related Methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'busdaycalendar',
"""
busdaycalendar(weekmask='1111100', holidays=None)
A business day calendar object that efficiently stores information
defining valid days for the busday family of functions.
The default valid days are Monday through Friday ("business days").
A busdaycalendar object can be specified with any set of weekly
valid days, plus an optional "holiday" dates that always will be invalid.
Once a busdaycalendar object is created, the weekmask and holidays
cannot be modified.
.. versionadded:: 1.7.0
Parameters
----------
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates, no matter which
weekday they fall upon. Holiday dates may be specified in any
order, and NaT (not-a-time) dates are ignored. This list is
saved in a normalized form that is suited for fast calculations
of valid days.
Returns
-------
out : busdaycalendar
A business day calendar object containing the specified
weekmask and holidays values.
See Also
--------
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Attributes
----------
Note: once a busdaycalendar object is created, you cannot modify the
weekmask or holidays. The attributes return copies of internal data.
weekmask : (copy) seven-element array of bool
holidays : (copy) sorted array of datetime64[D]
Examples
--------
>>> # Some important days in July
... bdd = np.busdaycalendar(
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
>>> # Default is Monday to Friday weekdays
... bdd.weekmask
array([ True, True, True, True, True, False, False], dtype='bool')
>>> # Any holidays already on the weekend are removed
... bdd.holidays
array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]')
""")
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask',
"""A copy of the seven-element boolean mask indicating valid days."""))
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays',
"""A copy of the holiday array indicating additional invalid days."""))
add_newdoc('numpy.core.multiarray', 'is_busday',
"""
is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
Calculates which of the given dates are valid days, and which are not.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of bool, optional
If provided, this array is filled with the result.
Returns
-------
out : array of bool
An array with the same shape as ``dates``, containing True for
each valid day, and False for each invalid day.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # The weekdays are Friday, Saturday, and Monday
... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
array([False, False, True], dtype='bool')
""")
add_newdoc('numpy.core.multiarray', 'busday_offset',
"""
busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
First adjusts the date to fall on a valid day according to
the ``roll`` rule, then applies offsets to the given dates
counted in valid days.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
offsets : array_like of int
The array of offsets, which is broadcast with ``dates``.
roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
How to treat dates that do not fall on a valid day. The default
is 'raise'.
* 'raise' means to raise an exception for an invalid day.
* 'nat' means to return a NaT (not-a-time) for an invalid day.
* 'forward' and 'following' mean to take the first valid day
later in time.
* 'backward' and 'preceding' mean to take the first valid day
earlier in time.
* 'modifiedfollowing' means to take the first valid day
later in time unless it is across a Month boundary, in which
case to take the first valid day earlier in time.
* 'modifiedpreceding' means to take the first valid day
earlier in time unless it is across a Month boundary, in which
case to take the first valid day later in time.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of datetime64[D], optional
If provided, this array is filled with the result.
Returns
-------
out : array of datetime64[D]
An array with a shape from broadcasting ``dates`` and ``offsets``
together, containing the dates with offsets applied.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # First business day in October 2011 (not accounting for holidays)
... np.busday_offset('2011-10', 0, roll='forward')
numpy.datetime64('2011-10-03','D')
>>> # Last business day in February 2012 (not accounting for holidays)
... np.busday_offset('2012-03', -1, roll='forward')
numpy.datetime64('2012-02-29','D')
>>> # Third Wednesday in January 2011
... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
numpy.datetime64('2011-01-19','D')
>>> # 2012 Mother's Day in Canada and the U.S.
... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
numpy.datetime64('2012-05-13','D')
>>> # First business day on or after a date
... np.busday_offset('2011-03-20', 0, roll='forward')
numpy.datetime64('2011-03-21','D')
>>> np.busday_offset('2011-03-22', 0, roll='forward')
numpy.datetime64('2011-03-22','D')
>>> # First business day after a date
... np.busday_offset('2011-03-20', 1, roll='backward')
numpy.datetime64('2011-03-21','D')
>>> np.busday_offset('2011-03-22', 1, roll='backward')
numpy.datetime64('2011-03-23','D')
""")
add_newdoc('numpy.core.multiarray', 'busday_count',
"""
busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None)
Counts the number of valid days between `begindates` and
`enddates`, not including the day of `enddates`.
If ``enddates`` specifies a date value that is earlier than the
corresponding ``begindates`` date value, the count will be negative.
.. versionadded:: 1.7.0
Parameters
----------
begindates : array_like of datetime64[D]
The array of the first dates for counting.
enddates : array_like of datetime64[D]
The array of the end dates for counting, which are excluded
from the count themselves.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of int, optional
If provided, this array is filled with the result.
Returns
-------
out : array of int
An array with a shape from broadcasting ``begindates`` and ``enddates``
together, containing the number of valid days between
the begin and end dates.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
Examples
--------
>>> # Number of weekdays in January 2011
... np.busday_count('2011-01', '2011-02')
21
>>> # Number of weekdays in 2011
... np.busday_count('2011', '2012')
260
>>> # Number of Saturdays in 2011
... np.busday_count('2011', '2012', weekmask='Sat')
53
""")
##############################################################################
#
# nd_grid instances
#
##############################################################################
add_newdoc('numpy.lib.index_tricks', 'mgrid',
"""
`nd_grid` instance which returns a dense multi-dimensional "meshgrid".
An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense
(or fleshed out) mesh-grid when indexed, so that each returned argument
has the same shape. The dimensions and number of the output arrays are
equal to the number of indexing dimensions. If the step length is not a
complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then
the integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
Returns
----------
mesh-grid `ndarrays` all of the same dimensions
See Also
--------
numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
ogrid : like mgrid but returns open (not fleshed out) mesh grids
r_ : array concatenator
Examples
--------
>>> np.mgrid[0:5,0:5]
array([[[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]],
[[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]])
>>> np.mgrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
""")
add_newdoc('numpy.lib.index_tricks', 'ogrid',
"""
`nd_grid` instance which returns an open multi-dimensional "meshgrid".
An instance of `numpy.lib.index_tricks.nd_grid` which returns an open
(i.e. not fleshed out) mesh-grid when indexed, so that only one dimension
of each returned array is greater than 1. The dimension and number of the
output arrays are equal to the number of indexing dimensions. If the step
length is not a complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then
the integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
Returns
----------
mesh-grid `ndarrays` with only one dimension :math:`\\neq 1`
See Also
--------
np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids
r_ : array concatenator
Examples
--------
>>> from numpy import ogrid
>>> ogrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
>>> ogrid[0:5,0:5]
[array([[0],
[1],
[2],
[3],
[4]]), array([[0, 1, 2, 3, 4]])]
""")
##############################################################################
#
# Documentation for `generic` attributes and methods
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'generic',
"""
Base class for numpy scalar types.
Class from which most (all?) numpy scalar types are derived. For
consistency, exposes the same API as `ndarray`, despite many
consequent attributes being either "get-only," or completely irrelevant.
This is the class from which it is strongly suggested users should derive
custom scalar types.
""")
# Attributes
add_newdoc('numpy.core.numerictypes', 'generic', ('T',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('base',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('data',
"""Pointer to start of data."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dtype',
"""Get array data-descriptor."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flags',
"""The integer value of flags."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flat',
"""A 1-D view of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('imag',
"""The imaginary part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize',
"""The length of one element in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes',
"""The length of the scalar in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ndim',
"""The number of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('real',
"""The real part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('shape',
"""Tuple of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('size',
"""The number of elements in the gentype."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('strides',
"""Tuple of bytes steps in each dimension."""))
# Methods
add_newdoc('numpy.core.numerictypes', 'generic', ('all',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('any',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmax',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmin',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argsort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('astype',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('choose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('clip',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('compress',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('copy',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dump',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dumps',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('fill',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flatten',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('getfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('item',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemset',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('max',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('mean',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('min',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new `dtype` with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
The `new_order` code can be any from the following:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
Parameters
----------
new_order : str, optional
Byte order to force; a value from the byte order specifications
above. The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New `dtype` object with the given change to the byte order.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('prod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ptp',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('put',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ravel',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('repeat',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('reshape',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('resize',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('round',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setflags',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('std',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('take',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tofile',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tolist',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tostring',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('trace',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('transpose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('var',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('view',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
##############################################################################
#
# Documentation for other scalar classes
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'bool_',
"""NumPy's Boolean type. Character code: ``?``. Alias: bool8""")
add_newdoc('numpy.core.numerictypes', 'complex64',
"""
Complex number type composed of two 32 bit floats. Character code: 'F'.
""")
add_newdoc('numpy.core.numerictypes', 'complex128',
"""
Complex number type composed of two 64 bit floats. Character code: 'D'.
Python complex compatible.
""")
add_newdoc('numpy.core.numerictypes', 'complex256',
"""
Complex number type composed of two 128-bit floats. Character code: 'G'.
""")
add_newdoc('numpy.core.numerictypes', 'float32',
"""
32-bit floating-point number. Character code 'f'. C float compatible.
""")
add_newdoc('numpy.core.numerictypes', 'float64',
"""
64-bit floating-point number. Character code 'd'. Python float compatible.
""")
add_newdoc('numpy.core.numerictypes', 'float96',
"""
""")
add_newdoc('numpy.core.numerictypes', 'float128',
"""
128-bit floating-point number. Character code: 'g'. C long float
compatible.
""")
add_newdoc('numpy.core.numerictypes', 'int8',
"""8-bit integer. Character code ``b``. C char compatible.""")
add_newdoc('numpy.core.numerictypes', 'int16',
"""16-bit integer. Character code ``h``. C short compatible.""")
add_newdoc('numpy.core.numerictypes', 'int32',
"""32-bit integer. Character code 'i'. C int compatible.""")
add_newdoc('numpy.core.numerictypes', 'int64',
"""64-bit integer. Character code 'l'. Python int compatible.""")
add_newdoc('numpy.core.numerictypes', 'object_',
"""Any Python object. Character code: 'O'.""")
|
AustereCuriosity/numpy
|
numpy/add_newdocs.py
|
Python
|
bsd-3-clause
| 224,421
|
[
"Brian"
] |
c27b51ab94b2ed7e72ed1d21ef06bc154cdfac075e2d7fe719555e1594f85f83
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import gzip
import json
import os
import random
import unittest
from collections import OrderedDict
import numpy as np
import pandas as pd
try:
import ruamel.yaml as yaml
except ImportError:
try:
import ruamel_yaml as yaml # type: ignore # noqa
except ImportError:
import yaml # type: ignore # noqa
from pymatgen.core.periodic_table import Element
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Molecule, Structure
from pymatgen.io.lammps.data import (
CombinedData,
ForceField,
LammpsBox,
LammpsData,
Topology,
lattice_2_lmpbox,
structure_2_lmpdata,
)
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "lammps")
class LammpsBoxTest(PymatgenTest):
@classmethod
def setUpClass(cls):
cls.peptide = LammpsBox(
bounds=[
[36.840194, 64.211560],
[41.013691, 68.385058],
[29.768095, 57.139462],
]
)
cls.quartz = LammpsBox(
bounds=[[0, 4.913400], [0, 4.255129], [0, 5.405200]],
tilt=[-2.456700, 0.0, 0.0],
)
def test_volume(self):
obounds = np.array(self.peptide.bounds)
ov = np.prod(obounds[:, 1] - obounds[:, 0])
self.assertEqual(self.peptide.volume, ov)
self.assertAlmostEqual(self.quartz.volume, 113.00733165874873)
def test_get_string(self):
peptide = self.peptide.get_string(5)
peptide_5 = """36.84019 64.21156 xlo xhi
41.01369 68.38506 ylo yhi
29.76809 57.13946 zlo zhi"""
self.assertEqual(peptide, peptide_5)
quartz = self.quartz.get_string(4)
quartz_4 = """0.0000 4.9134 xlo xhi
0.0000 4.2551 ylo yhi
0.0000 5.4052 zlo zhi
-2.4567 0.0000 0.0000 xy xz yz"""
self.assertEqual(quartz, quartz_4)
def test_get_box_shift(self):
peptide = self.peptide
self.assertEqual(peptide.get_box_shift([1, 0, 0])[0], 64.211560 - 36.840194)
self.assertEqual(peptide.get_box_shift([0, 0, -1])[-1], 29.768095 - 57.139462)
quartz = self.quartz
np.testing.assert_array_almost_equal(quartz.get_box_shift([0, 0, 1]), [0, 0, 5.4052], 4)
np.testing.assert_array_almost_equal(quartz.get_box_shift([0, 1, -1]), [-2.4567, 4.2551, -5.4052], 4)
np.testing.assert_array_almost_equal(quartz.get_box_shift([1, -1, 0]), [4.9134 + 2.4567, -4.2551, 0], 4)
def test_to_lattice(self):
peptide = self.peptide.to_lattice()
np.testing.assert_array_almost_equal(peptide.abc, [27.371367] * 3)
self.assertTrue(peptide.is_orthogonal)
quartz = self.quartz.to_lattice()
np.testing.assert_array_almost_equal(
quartz.matrix,
[[4.913400, 0, 0], [-2.456700, 4.255129, 0], [0, 0, 5.405200]],
)
class LammpsDataTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.peptide = LammpsData.from_file(filename=os.path.join(test_dir, "data.peptide"))
cls.ethane = LammpsData.from_file(filename=os.path.join(test_dir, "ethane.data"))
cls.quartz = LammpsData.from_file(filename=os.path.join(test_dir, "data.quartz"), atom_style="atomic")
cls.virus = LammpsData.from_file(filename=os.path.join(test_dir, "virus.data"), atom_style="angle")
cls.tatb = LammpsData.from_file(
filename=os.path.join(test_dir, "tatb.data"),
atom_style="charge",
sort_id=True,
)
def test_structure(self):
quartz = self.quartz.structure
np.testing.assert_array_almost_equal(
quartz.lattice.matrix,
[[4.913400, 0, 0], [-2.456700, 4.255129, 0], [0, 0, 5.405200]],
)
self.assertEqual(quartz.formula, "Si3 O6")
self.assertNotIn("molecule-ID", self.quartz.atoms.columns)
ethane = self.ethane.structure
np.testing.assert_array_almost_equal(ethane.lattice.matrix, np.diag([10.0] * 3))
lbounds = np.array(self.ethane.box.bounds)[:, 0]
coords = self.ethane.atoms[["x", "y", "z"]].values - lbounds
np.testing.assert_array_almost_equal(ethane.cart_coords, coords)
np.testing.assert_array_almost_equal(ethane.site_properties["charge"], self.ethane.atoms["q"])
tatb = self.tatb.structure
frac_coords = tatb.frac_coords[381]
real_frac_coords = frac_coords - np.floor(frac_coords)
np.testing.assert_array_almost_equal(real_frac_coords, [0.01553397, 0.71487872, 0.14134139])
co = Structure.from_spacegroup(194, Lattice.hexagonal(2.50078, 4.03333), ["Co"], [[1 / 3, 2 / 3, 1 / 4]])
ld_co = LammpsData.from_structure(co)
self.assertEqual(ld_co.structure.composition.reduced_formula, "Co")
ni = Structure.from_spacegroup(225, Lattice.cubic(3.50804), ["Ni"], [[0, 0, 0]])
ld_ni = LammpsData.from_structure(ni)
self.assertEqual(ld_ni.structure.composition.reduced_formula, "Ni")
def test_sort_structure(self):
s = Structure(Lattice.cubic(4), ["S", "Fe"], [[0, 0, 0], [0.5, 0.5, 0.5]])
lmp = LammpsData.from_structure(s, is_sort=False)
lmp.write_file("test1.data")
lmp2 = LammpsData.from_file("test1.data", atom_style="charge")
# internally element:type will be {Fe: 1, S: 2},
# therefore without sorting the atom types in structure
# will be [2, 1], i.e., (S, Fe)
self.assertListEqual(lmp2.atoms["type"].values.tolist(), [2, 1])
# with sorting the atom types in structures will be [1, 2]
lmp = LammpsData.from_structure(s, is_sort=True)
lmp.write_file("test1.data")
lmp2 = LammpsData.from_file("test1.data", atom_style="charge")
self.assertListEqual(lmp2.atoms["type"].values.tolist(), [1, 2])
def test_get_string(self):
pep = self.peptide.get_string(distance=7, velocity=5, charge=4)
pep_lines = pep.split("\n")
pep_kws = [
"Masses",
"Pair Coeffs",
"Bond Coeffs",
"Angle Coeffs",
"Dihedral Coeffs",
"Improper Coeffs",
"Atoms",
"Velocities",
"Bonds",
"Angles",
"Dihedrals",
"Impropers",
]
kw_inds = {l: i for i, l in enumerate(pep_lines) if l in pep_kws}
# section sequence
self.assertListEqual([k for k in sorted(kw_inds, key=kw_inds.get)], pep_kws)
# header
pep_header = "\n".join(pep_lines[: kw_inds["Masses"]])
pep_header_7 = """Generated by pymatgen.io.lammps.data.LammpsData
2004 atoms
1365 bonds
786 angles
207 dihedrals
12 impropers
14 atom types
18 bond types
31 angle types
21 dihedral types
2 improper types
36.8401940 64.2115600 xlo xhi
41.0136910 68.3850580 ylo yhi
29.7680950 57.1394620 zlo zhi
"""
self.assertEqual(pep_header, pep_header_7)
# int vs float for coeffs
pep_dihedral_coeff = pep_lines[kw_inds["Dihedral Coeffs"] + 2]
self.assertEqual(pep_dihedral_coeff, "1 0.200 1 180 1.0")
# distance and charge
pep_atom = pep_lines[kw_inds["Atoms"] + 2]
self.assertEqual(
pep_atom,
"1 1 1 0.5100 43.9999300 " "58.5267800 36.7855000 0 0 0",
)
# velocity
pep_velo = pep_lines[kw_inds["Velocities"] + 2]
self.assertEqual(pep_velo, "1 -0.00067 -0.00282 0.00383")
# no floats in topology sections
pep_topos = "\n".join(pep_lines[kw_inds["Bonds"] :])
self.assertNotIn(".", pep_topos)
c2h6 = self.ethane.get_string(distance=5, charge=3)
c2h6_lines = c2h6.split("\n")
c2h6_kws = [
"Masses",
"Pair Coeffs",
"Bond Coeffs",
"Angle Coeffs",
"Dihedral Coeffs",
"Improper Coeffs",
"BondBond Coeffs",
"BondAngle Coeffs",
"MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs",
"AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs",
"BondBond13 Coeffs",
"AngleAngle Coeffs",
"Atoms",
"Bonds",
"Angles",
"Dihedrals",
"Impropers",
]
kw_inds = {l: i for i, l in enumerate(c2h6_lines) if l in c2h6_kws}
# section sequence
self.assertListEqual([k for k in sorted(kw_inds, key=kw_inds.get)], c2h6_kws)
# header
c2h6_header = "\n".join(c2h6_lines[: kw_inds["Masses"]])
c2h6_header_5 = """Generated by pymatgen.io.lammps.data.LammpsData
8 atoms
7 bonds
12 angles
9 dihedrals
8 impropers
2 atom types
2 bond types
2 angle types
1 dihedral types
2 improper types
0.21455 10.21454 xlo xhi
0.11418 10.11418 ylo yhi
-10.00014 -0.00015 zlo zhi
"""
self.assertEqual(c2h6_header, c2h6_header_5)
# distance and charge
c2h6_atom = c2h6_lines[kw_inds["Atoms"] + 2]
self.assertEqual(c2h6_atom, "1 1 1 -0.080 4.46291 5.14833 -5.00041" " 0 0 0")
# no floats in topology sections
c2h6_topos = "\n".join(c2h6_lines[kw_inds["Bonds"] :])
self.assertNotIn(".", c2h6_topos)
quartz = self.quartz.get_string(distance=4)
quartz_lines = quartz.split("\n")
quartz_kws = ["Masses", "Atoms"]
kw_inds = {l: i for i, l in enumerate(quartz_lines) if l in quartz_kws}
# header
quartz_header = "\n".join(quartz_lines[: kw_inds["Masses"]])
quartz_header_4 = """Generated by pymatgen.io.lammps.data.LammpsData
9 atoms
2 atom types
0.0000 4.9134 xlo xhi
0.0000 4.2551 ylo yhi
0.0000 5.4052 zlo zhi
-2.4567 0.0000 0.0000 xy xz yz
"""
self.assertEqual(quartz_header, quartz_header_4)
# distance
quartz_atom = quartz_lines[kw_inds["Atoms"] + 2]
self.assertEqual(quartz_atom, "1 1 2.3088 0.0000 3.6035")
virus = self.virus.get_string()
virus_lines = virus.split("\n")
pairij_coeff = virus_lines[virus_lines.index("PairIJ Coeffs") + 5]
self.assertEqual(pairij_coeff.strip().split(), ["1", "4", "1", "1.000", "1.12250"])
def test_write_file(self):
filename1 = "test1.data"
self.ethane.write_file(filename=filename1)
c2h6 = LammpsData.from_file(filename1)
pd.testing.assert_frame_equal(c2h6.masses, self.ethane.masses)
pd.testing.assert_frame_equal(c2h6.atoms, self.ethane.atoms)
ff_kw = random.sample(self.ethane.force_field.keys(), 1)[0]
pd.testing.assert_frame_equal(c2h6.force_field[ff_kw], self.ethane.force_field[ff_kw], ff_kw)
topo_kw = random.sample(self.ethane.topology.keys(), 1)[0]
pd.testing.assert_frame_equal(c2h6.topology[topo_kw], self.ethane.topology[topo_kw], topo_kw)
filename2 = "test2.data"
self.virus.write_file(filename=filename2)
v = LammpsData.from_file(filename2, atom_style="angle")
pd.testing.assert_frame_equal(v.force_field["PairIJ Coeffs"], self.virus.force_field["PairIJ Coeffs"])
def test_disassemble(self):
# general tests
c = LammpsData.from_file(os.path.join(test_dir, "crambin.data"))
_, c_ff, topos = c.disassemble()
mass_info = [
("N1", 14.0067),
("H1", 1.00797),
("C1", 12.01115),
("H2", 1.00797),
("C2", 12.01115),
("O1", 15.9994),
("C3", 12.01115),
("O2", 15.9994),
("H3", 1.00797),
("C4", 12.01115),
("N2", 14.0067),
("C5", 12.01115),
("S1", 32.064),
("C6", 12.01115),
("N3", 14.0067),
("C7", 12.01115),
("C8", 12.01115),
("C9", 12.01115),
("O3", 15.9994),
]
self.assertListEqual(c_ff.mass_info, mass_info)
np.testing.assert_array_equal(c_ff.nonbond_coeffs, c.force_field["Pair Coeffs"].values)
base_kws = ["Bond", "Angle", "Dihedral", "Improper"]
for kw in base_kws:
ff_kw = kw + " Coeffs"
i = random.randint(0, len(c_ff.topo_coeffs[ff_kw]) - 1)
sample_coeff = c_ff.topo_coeffs[ff_kw][i]
np.testing.assert_array_equal(sample_coeff["coeffs"], c.force_field[ff_kw].iloc[i].values, ff_kw)
topo = topos[-1]
atoms = c.atoms[c.atoms["molecule-ID"] == 46]
np.testing.assert_array_almost_equal(topo.sites.cart_coords, atoms[["x", "y", "z"]])
np.testing.assert_array_equal(topo.charges, atoms["q"])
atom_labels = [m[0] for m in mass_info]
self.assertListEqual(
topo.sites.site_properties["ff_map"],
[atom_labels[i - 1] for i in atoms["type"]],
)
shift = min(atoms.index)
for kw in base_kws:
ff_kw = kw + " Coeffs"
ff_coeffs = c_ff.topo_coeffs[ff_kw]
topo_kw = kw + "s"
topos_df = c.topology[topo_kw]
topo_df = topos_df[topos_df["atom1"] >= shift]
topo_arr = topo_df.drop("type", axis=1).values
np.testing.assert_array_equal(topo.topologies[topo_kw], topo_arr - shift, topo_kw)
sample_topo = random.sample(list(topo_df.itertuples(False, None)), 1)[0]
topo_type_idx = sample_topo[0] - 1
topo_type = tuple([atom_labels[i - 1] for i in atoms.loc[list(sample_topo[1:])]["type"]])
self.assertIn(topo_type, ff_coeffs[topo_type_idx]["types"], ff_kw)
# test no guessing element and pairij as nonbond coeffs
v = self.virus
_, v_ff, _ = v.disassemble(guess_element=False)
self.assertDictEqual(v_ff.maps["Atoms"], dict(Qa1=1, Qb1=2, Qc1=3, Qa2=4))
pairij_coeffs = v.force_field["PairIJ Coeffs"].drop(["id1", "id2"], axis=1)
np.testing.assert_array_equal(v_ff.nonbond_coeffs, pairij_coeffs.values)
# test class2 ff
_, e_ff, _ = self.ethane.disassemble()
e_topo_coeffs = e_ff.topo_coeffs
for k in ["BondBond Coeffs", "BondAngle Coeffs"]:
self.assertIn(k, e_topo_coeffs["Angle Coeffs"][0], k)
for k in [
"MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs",
"AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs",
"BondBond13 Coeffs",
]:
self.assertIn(k, e_topo_coeffs["Dihedral Coeffs"][0], k)
self.assertIn("AngleAngle Coeffs", e_topo_coeffs["Improper Coeffs"][0])
def test_from_file(self):
# general tests
pep = self.peptide
# header stats and Nos. of columns
self.assertEqual(pep.masses.shape, (14, 1))
self.assertEqual(pep.atoms.shape, (2004, 9))
self.assertListEqual(
list(pep.atoms.columns),
["molecule-ID", "type", "q", "x", "y", "z", "nx", "ny", "nz"],
)
topo = pep.topology
self.assertEqual(topo["Bonds"].shape, (1365, 3))
self.assertEqual(topo["Angles"].shape, (786, 4))
self.assertEqual(topo["Dihedrals"].shape, (207, 5))
self.assertEqual(topo["Impropers"].shape, (12, 5))
ff = pep.force_field
self.assertEqual(ff["Pair Coeffs"].shape, (14, 4))
self.assertEqual(ff["Bond Coeffs"].shape, (18, 2))
self.assertEqual(ff["Angle Coeffs"].shape, (31, 4))
self.assertEqual(ff["Dihedral Coeffs"].shape, (21, 4))
self.assertEqual(ff["Improper Coeffs"].shape, (2, 2))
# header box
np.testing.assert_array_equal(
pep.box.bounds,
[[36.840194, 64.211560], [41.013691, 68.385058], [29.768095, 57.139462]],
)
# body
self.assertEqual(pep.masses.at[7, "mass"], 12.0110)
self.assertEqual(ff["Pair Coeffs"].at[9, "coeff3"], 0.152100)
self.assertEqual(ff["Bond Coeffs"].at[5, "coeff2"], 1.430000)
self.assertEqual(ff["Angle Coeffs"].at[21, "coeff2"], 120.000000)
self.assertEqual(ff["Dihedral Coeffs"].at[10, "coeff1"], 0.040000)
self.assertEqual(ff["Improper Coeffs"].at[2, "coeff1"], 20.000000)
self.assertEqual(pep.atoms.at[29, "molecule-ID"], 1)
self.assertEqual(pep.atoms.at[29, "type"], 7)
self.assertEqual(pep.atoms.at[29, "q"], -0.020)
self.assertAlmostEqual(pep.atoms.at[29, "x"], 42.96709)
self.assertEqual(pep.atoms.at[1808, "molecule-ID"], 576)
self.assertEqual(pep.atoms.at[1808, "type"], 14)
self.assertAlmostEqual(pep.atoms.at[1808, "y"], 58.64352)
self.assertEqual(pep.atoms.at[1808, "nx"], -1)
self.assertAlmostEqual(pep.velocities.at[527, "vz"], -0.010889)
self.assertEqual(topo["Bonds"].at[47, "type"], 8)
self.assertEqual(topo["Bonds"].at[47, "atom2"], 54)
self.assertEqual(topo["Bonds"].at[953, "atom1"], 1384)
self.assertEqual(topo["Angles"].at[105, "type"], 19)
self.assertEqual(topo["Angles"].at[105, "atom3"], 51)
self.assertEqual(topo["Angles"].at[376, "atom2"], 772)
self.assertEqual(topo["Dihedrals"].at[151, "type"], 14)
self.assertEqual(topo["Dihedrals"].at[151, "atom4"], 51)
self.assertEqual(topo["Impropers"].at[4, "atom4"], 32)
# class 2 and comments
ethane = self.ethane
self.assertEqual(ethane.masses.shape, (2, 1))
self.assertEqual(ethane.atoms.shape, (8, 9))
class2 = ethane.force_field
self.assertEqual(class2["Pair Coeffs"].shape, (2, 2))
self.assertEqual(class2["Bond Coeffs"].shape, (2, 4))
self.assertEqual(class2["Angle Coeffs"].shape, (2, 4))
self.assertEqual(class2["Dihedral Coeffs"].shape, (1, 6))
self.assertEqual(class2["Improper Coeffs"].shape, (2, 2))
self.assertEqual(class2["BondBond Coeffs"].at[2, "coeff3"], 1.1010)
self.assertEqual(class2["BondAngle Coeffs"].at[2, "coeff4"], 1.1010)
self.assertEqual(class2["AngleAngle Coeffs"].at[2, "coeff6"], 107.6600)
self.assertEqual(class2["AngleAngle Coeffs"].at[2, "coeff6"], 107.6600)
self.assertEqual(class2["AngleAngleTorsion Coeffs"].at[1, "coeff3"], 110.7700)
self.assertEqual(class2["EndBondTorsion Coeffs"].at[1, "coeff8"], 1.1010)
self.assertEqual(class2["MiddleBondTorsion Coeffs"].at[1, "coeff4"], 1.5300)
self.assertEqual(class2["BondBond13 Coeffs"].at[1, "coeff3"], 1.1010)
self.assertEqual(class2["AngleTorsion Coeffs"].at[1, "coeff8"], 110.7700)
# tilt box and another atom_style
quartz = self.quartz
np.testing.assert_array_equal(quartz.box.tilt, [-2.456700, 0.0, 0.0])
self.assertListEqual(list(quartz.atoms.columns), ["type", "x", "y", "z"])
self.assertAlmostEqual(quartz.atoms.at[7, "x"], 0.299963)
# PairIJ Coeffs section
virus = self.virus
pairij = virus.force_field["PairIJ Coeffs"]
self.assertEqual(pairij.at[7, "id1"], 3)
self.assertEqual(pairij.at[7, "id2"], 3)
self.assertEqual(pairij.at[7, "coeff2"], 2.1)
# sort_id
atom_id = random.randint(1, 384)
self.assertEqual(self.tatb.atoms.loc[atom_id].name, atom_id)
def test_from_ff_and_topologies(self):
mass = OrderedDict()
mass["H"] = 1.0079401
mass["O"] = 15.999400
nonbond_coeffs = [[0.00774378, 0.98], [0.1502629, 3.1169]]
topo_coeffs = {
"Bond Coeffs": [{"coeffs": [176.864, 0.9611], "types": [("H", "O")]}],
"Angle Coeffs": [{"coeffs": [42.1845, 109.4712], "types": [("H", "O", "H")]}],
}
ff = ForceField(mass.items(), nonbond_coeffs, topo_coeffs)
with gzip.open(os.path.join(test_dir, "topologies_ice.json.gz")) as f:
topo_dicts = json.load(f)
topologies = [Topology.from_dict(d) for d in topo_dicts]
box = LammpsBox([[-0.75694412, 44.165558], [0.38127473, 47.066074], [0.17900842, 44.193867]])
ice = LammpsData.from_ff_and_topologies(box=box, ff=ff, topologies=topologies)
atoms = ice.atoms
bonds = ice.topology["Bonds"]
angles = ice.topology["Angles"]
np.testing.assert_array_equal(atoms.index.values, np.arange(1, len(atoms) + 1))
np.testing.assert_array_equal(bonds.index.values, np.arange(1, len(bonds) + 1))
np.testing.assert_array_equal(angles.index.values, np.arange(1, len(angles) + 1))
i = random.randint(0, len(topologies) - 1)
sample = topologies[i]
in_atoms = ice.atoms[ice.atoms["molecule-ID"] == i + 1]
np.testing.assert_array_equal(in_atoms.index.values, np.arange(3 * i + 1, 3 * i + 4))
np.testing.assert_array_equal(in_atoms["type"].values, [2, 1, 1])
np.testing.assert_array_equal(in_atoms["q"].values, sample.charges)
np.testing.assert_array_equal(in_atoms[["x", "y", "z"]].values, sample.sites.cart_coords)
broken_topo_coeffs = {
"Bond Coeffs": [{"coeffs": [176.864, 0.9611], "types": [("H", "O")]}],
"Angle Coeffs": [{"coeffs": [42.1845, 109.4712], "types": [("H", "H", "H")]}],
}
broken_ff = ForceField(mass.items(), nonbond_coeffs, broken_topo_coeffs)
ld_woangles = LammpsData.from_ff_and_topologies(box=box, ff=broken_ff, topologies=[sample])
self.assertNotIn("Angles", ld_woangles.topology)
def test_from_structure(self):
latt = Lattice.monoclinic(9.78746, 4.75058, 8.95892, 115.9693)
structure = Structure.from_spacegroup(
15,
latt,
["Os", "O", "O"],
[
[0, 0.25583, 0.75],
[0.11146, 0.46611, 0.91631],
[0.11445, 0.04564, 0.69518],
],
)
velocities = np.random.randn(20, 3) * 0.1
structure.add_site_property("velocities", velocities)
ld = LammpsData.from_structure(structure=structure, ff_elements=["O", "Os", "Na"])
i = random.randint(0, 19)
a = latt.matrix[0]
va = velocities[i].dot(a) / np.linalg.norm(a)
self.assertAlmostEqual(va, ld.velocities.loc[i + 1, "vx"])
self.assertAlmostEqual(velocities[i, 1], ld.velocities.loc[i + 1, "vy"])
np.testing.assert_array_almost_equal(ld.masses["mass"], [22.989769, 190.23, 15.9994])
np.testing.assert_array_equal(ld.atoms["type"], [2] * 4 + [3] * 16)
def test_json_dict(self):
encoded = json.dumps(self.ethane.as_dict())
decoded = json.loads(encoded)
c2h6 = LammpsData.from_dict(decoded)
pd.testing.assert_frame_equal(c2h6.masses, self.ethane.masses)
pd.testing.assert_frame_equal(c2h6.atoms, self.ethane.atoms)
ff = self.ethane.force_field
key, target_df = random.sample(ff.items(), 1)[0]
self.assertIsNone(
pd.testing.assert_frame_equal(c2h6.force_field[key], target_df, check_dtype=False),
key,
)
topo = self.ethane.topology
key, target_df = random.sample(topo.items(), 1)[0]
self.assertIsNone(pd.testing.assert_frame_equal(c2h6.topology[key], target_df), key)
@classmethod
def tearDownClass(cls):
tmpfiles = ["test1.data", "test2.data"]
for t in tmpfiles:
if os.path.exists(t):
os.remove(t)
class TopologyTest(unittest.TestCase):
def test_init(self):
inner_charge = np.random.rand(10) - 0.5
outer_charge = np.random.rand(10) - 0.5
inner_velo = np.random.rand(10, 3) - 0.5
outer_velo = np.random.rand(10, 3) - 0.5
m = Molecule(
["H"] * 10,
np.random.rand(10, 3) * 100,
site_properties={
"ff_map": ["D"] * 10,
"charge": inner_charge,
"velocities": inner_velo,
},
)
# q and v from site properties, while type from species_string
topo = Topology(sites=m)
self.assertListEqual(topo.type_by_sites, ["H"] * 10)
np.testing.assert_array_equal(topo.charges, inner_charge)
np.testing.assert_array_equal(topo.velocities, inner_velo)
# q and v from overriding, while type from site property
topo_override = Topology(sites=m, ff_label="ff_map", charges=outer_charge, velocities=outer_velo)
self.assertListEqual(topo_override.type_by_sites, ["D"] * 10)
np.testing.assert_array_equal(topo_override.charges, outer_charge)
np.testing.assert_array_equal(topo_override.velocities, outer_velo)
# test using a list of sites instead of SiteCollection
topo_from_list = Topology(sites=m.sites)
self.assertListEqual(topo_from_list.type_by_sites, topo.type_by_sites)
np.testing.assert_array_equal(topo_from_list.charges, topo.charges)
np.testing.assert_array_equal(topo_from_list.velocities, topo.velocities)
def test_from_bonding(self):
# He: no bonding topologies
helium = Molecule(["He"], [[0, 0, 0]])
topo_he = Topology.from_bonding(molecule=helium)
self.assertIsNone(topo_he.topologies)
# H2: 1 bond only
hydrogen = Molecule(["H"] * 2, [[0, 0, 0], [0, 0, 0.7414]])
topo_h = Topology.from_bonding(molecule=hydrogen)
tp_h = topo_h.topologies
self.assertListEqual(tp_h["Bonds"], [[0, 1]])
self.assertNotIn("Angles", tp_h)
self.assertNotIn("Dihedrals", tp_h)
# water: 2 bonds and 1 angle only
water = Molecule(
["O", "H", "H"],
[
[0.0000, 0.0000, 0.1173],
[0.0000, 0.7572, -0.4692],
[0.0000, -0.7572, -0.4692],
],
)
topo_water = Topology.from_bonding(molecule=water)
tp_water = topo_water.topologies
self.assertListEqual(tp_water["Bonds"], [[0, 1], [0, 2]])
self.assertListEqual(tp_water["Angles"], [[1, 0, 2]])
self.assertNotIn("Dihedrals", tp_water)
# EtOH
etoh = Molecule(
["C", "C", "O", "H", "H", "H", "H", "H", "H"],
[
[1.1879, -0.3829, 0.0000],
[0.0000, 0.5526, 0.0000],
[-1.1867, -0.2472, 0.0000],
[-1.9237, 0.3850, 0.0000],
[2.0985, 0.2306, 0.0000],
[1.1184, -1.0093, 0.8869],
[1.1184, -1.0093, -0.8869],
[-0.0227, 1.1812, 0.8852],
[-0.0227, 1.1812, -0.8852],
],
)
topo_etoh = Topology.from_bonding(molecule=etoh)
tp_etoh = topo_etoh.topologies
self.assertEqual(len(tp_etoh["Bonds"]), 8)
etoh_bonds = [[0, 1], [0, 4], [0, 5], [0, 6], [1, 2], [1, 7], [1, 8], [2, 3]]
np.testing.assert_array_equal(tp_etoh["Bonds"], etoh_bonds)
self.assertEqual(len(tp_etoh["Angles"]), 13)
etoh_angles = [
[1, 0, 4],
[1, 0, 5],
[1, 0, 6],
[4, 0, 5],
[4, 0, 6],
[5, 0, 6],
[0, 1, 2],
[0, 1, 7],
[0, 1, 8],
[2, 1, 7],
[2, 1, 8],
[7, 1, 8],
[1, 2, 3],
]
np.testing.assert_array_equal(tp_etoh["Angles"], etoh_angles)
self.assertEqual(len(tp_etoh["Dihedrals"]), 12)
etoh_dihedrals = [
[4, 0, 1, 2],
[4, 0, 1, 7],
[4, 0, 1, 8],
[5, 0, 1, 2],
[5, 0, 1, 7],
[5, 0, 1, 8],
[6, 0, 1, 2],
[6, 0, 1, 7],
[6, 0, 1, 8],
[0, 1, 2, 3],
[7, 1, 2, 3],
[8, 1, 2, 3],
]
np.testing.assert_array_equal(tp_etoh["Dihedrals"], etoh_dihedrals)
self.assertIsNotNone(json.dumps(topo_etoh.as_dict()))
# bond flag to off
topo_etoh0 = Topology.from_bonding(molecule=etoh, bond=False, angle=True, dihedral=True)
self.assertIsNone(topo_etoh0.topologies)
# angle or dihedral flag to off
topo_etoh1 = Topology.from_bonding(molecule=etoh, angle=False)
self.assertNotIn("Angles", topo_etoh1.topologies)
topo_etoh2 = Topology.from_bonding(molecule=etoh, dihedral=False)
self.assertNotIn("Dihedrals", topo_etoh2.topologies)
class ForceFieldTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
mass_info = [
("A", "H"),
("B", Element("C")),
("C", Element("O")),
("D", 1.00794),
]
nonbond_coeffs = [
[1, 1, 1.1225],
[1, 1.175, 1.31894],
[1, 1.55, 1.73988],
[1, 1, 1.1225],
[1, 1.35, 4],
[1, 1.725, 1.93631],
[1, 1.175, 1.31894],
[1, 2.1, 4],
[1, 1.55, 1.73988],
[1, 1, 1.1225],
]
topo_coeffs = {
"Bond Coeffs": [
{"coeffs": [50, 0.659469], "types": [("A", "B"), ("C", "D")]},
{"coeffs": [50, 0.855906], "types": [("B", "C")]},
]
}
cls.virus = ForceField(mass_info=mass_info, nonbond_coeffs=nonbond_coeffs, topo_coeffs=topo_coeffs)
cls.ethane = ForceField.from_file(os.path.join(test_dir, "ff_ethane.yaml"))
def test_init(self):
v = self.virus
self.assertListEqual(
v.mass_info,
[("A", 1.00794), ("B", 12.0107), ("C", 15.9994), ("D", 1.00794)],
)
self.assertEqual(v.masses.at[3, "mass"], 15.9994)
v_ff = v.force_field
self.assertNotIn("Pair Coeffs", v_ff)
self.assertEqual(v_ff["PairIJ Coeffs"].iat[5, 4], 1.93631)
self.assertEqual(v_ff["Bond Coeffs"].at[2, "coeff2"], 0.855906)
v_maps = v.maps
self.assertDictEqual(v_maps["Atoms"], {"A": 1, "B": 2, "C": 3, "D": 4})
self.assertDictEqual(
v_maps["Bonds"],
{
("A", "B"): 1,
("C", "D"): 1,
("B", "A"): 1,
("D", "C"): 1,
("B", "C"): 2,
("C", "B"): 2,
},
)
e = self.ethane
self.assertEqual(e.masses.at[1, "mass"], 12.01115)
e_ff = e.force_field
self.assertNotIn("PairIJ Coeffs", e_ff)
self.assertEqual(e_ff["Pair Coeffs"].at[1, "coeff2"], 3.854)
self.assertEqual(e_ff["Bond Coeffs"].at[2, "coeff4"], 844.6)
self.assertEqual(e_ff["Angle Coeffs"].at[2, "coeff4"], -2.4318)
self.assertEqual(e_ff["Dihedral Coeffs"].at[1, "coeff1"], -0.1432)
self.assertEqual(e_ff["Improper Coeffs"].at[2, "coeff2"], 0.0)
self.assertEqual(e_ff["BondBond Coeffs"].at[2, "coeff1"], 5.3316)
self.assertEqual(e_ff["BondAngle Coeffs"].at[1, "coeff3"], 1.53)
self.assertEqual(e_ff["MiddleBondTorsion Coeffs"].at[1, "coeff1"], -14.261)
self.assertEqual(e_ff["EndBondTorsion Coeffs"].at[1, "coeff1"], 0.213)
self.assertEqual(e_ff["AngleTorsion Coeffs"].at[1, "coeff3"], -0.2466)
self.assertEqual(e_ff["AngleAngleTorsion Coeffs"].at[1, "coeff1"], -12.564)
self.assertEqual(e_ff["BondBond13 Coeffs"].at[1, "coeff1"], 0.0)
self.assertEqual(e_ff["AngleAngle Coeffs"].at[1, "coeff2"], -0.4825)
e_maps = e.maps
self.assertDictEqual(e_maps["Atoms"], {"c4": 1, "h1": 2})
self.assertDictEqual(e_maps["Bonds"], {("c4", "c4"): 1, ("c4", "h1"): 2, ("h1", "c4"): 2})
self.assertDictEqual(
e_maps["Angles"],
{("c4", "c4", "h1"): 1, ("h1", "c4", "c4"): 1, ("h1", "c4", "h1"): 2},
)
self.assertEqual(
e_maps["Impropers"],
{
("c4", "c4", "h1", "h1"): 1,
("c4", "h1", "c4", "h1"): 1,
("h1", "h1", "c4", "c4"): 1,
("h1", "c4", "h1", "c4"): 1,
("h1", "c4", "h1", "h1"): 2,
("h1", "h1", "c4", "h1"): 2,
},
)
def test_to_file(self):
filename = "ff_test.yaml"
v = self.virus
v.to_file(filename=filename)
yml = yaml.YAML(typ="safe")
with open(filename, "r") as f:
d = yml.load(f)
self.assertListEqual(d["mass_info"], [list(m) for m in v.mass_info])
self.assertListEqual(d["nonbond_coeffs"], v.nonbond_coeffs)
def test_from_file(self):
e = self.ethane
self.assertListEqual(e.mass_info, [("c4", 12.01115), ("h1", 1.00797)])
np.testing.assert_array_equal(e.nonbond_coeffs, [[0.062, 3.854], [0.023, 2.878]])
e_tc = e.topo_coeffs
self.assertIn("Bond Coeffs", e_tc)
self.assertIn("BondAngle Coeffs", e_tc["Angle Coeffs"][0])
self.assertIn("BondBond Coeffs", e_tc["Angle Coeffs"][0])
self.assertIn("AngleAngleTorsion Coeffs", e_tc["Dihedral Coeffs"][0])
self.assertIn("AngleTorsion Coeffs", e_tc["Dihedral Coeffs"][0])
self.assertIn("BondBond13 Coeffs", e_tc["Dihedral Coeffs"][0])
self.assertIn("EndBondTorsion Coeffs", e_tc["Dihedral Coeffs"][0])
self.assertIn("MiddleBondTorsion Coeffs", e_tc["Dihedral Coeffs"][0])
self.assertIn("AngleAngle Coeffs", e_tc["Improper Coeffs"][0])
def test_from_dict(self):
d = self.ethane.as_dict()
json_str = json.dumps(d)
decoded = ForceField.from_dict(json.loads(json_str))
self.assertListEqual(decoded.mass_info, self.ethane.mass_info)
self.assertListEqual(decoded.nonbond_coeffs, self.ethane.nonbond_coeffs)
self.assertDictEqual(decoded.topo_coeffs, self.ethane.topo_coeffs)
@classmethod
def tearDownClass(cls):
if os.path.exists("ff_test.yaml"):
os.remove("ff_test.yaml")
class FuncTest(unittest.TestCase):
def test_lattice_2_lmpbox(self):
matrix = np.diag(np.random.randint(5, 14, size=(3,))) + np.random.rand(3, 3) * 0.2 - 0.1
init_latt = Lattice(matrix)
frac_coords = np.random.rand(10, 3)
init_structure = Structure(init_latt, ["H"] * 10, frac_coords)
origin = np.random.rand(3) * 10 - 5
box, symmop = lattice_2_lmpbox(lattice=init_latt, origin=origin)
boxed_latt = box.to_lattice()
np.testing.assert_array_almost_equal(init_latt.abc, boxed_latt.abc)
np.testing.assert_array_almost_equal(init_latt.angles, boxed_latt.angles)
cart_coords = symmop.operate_multi(init_structure.cart_coords) - origin
boxed_structure = Structure(boxed_latt, ["H"] * 10, cart_coords, coords_are_cartesian=True)
np.testing.assert_array_almost_equal(boxed_structure.frac_coords, frac_coords)
tetra_latt = Lattice.tetragonal(5, 5)
tetra_box, _ = lattice_2_lmpbox(tetra_latt)
self.assertIsNone(tetra_box.tilt)
ortho_latt = Lattice.orthorhombic(5, 5, 5)
ortho_box, _ = lattice_2_lmpbox(ortho_latt)
self.assertIsNone(ortho_box.tilt)
rot_tetra_latt = Lattice([[5, 0, 0], [0, 2, 2], [0, -2, 2]])
_, rotop = lattice_2_lmpbox(rot_tetra_latt)
np.testing.assert_array_almost_equal(
rotop.rotation_matrix,
[
[1, 0, 0],
[0, 2 ** 0.5 / 2, 2 ** 0.5 / 2],
[0, -(2 ** 0.5) / 2, 2 ** 0.5 / 2],
],
)
@unittest.skip("The function is deprecated")
def test_structure_2_lmpdata(self):
matrix = np.diag(np.random.randint(5, 14, size=(3,))) + np.random.rand(3, 3) * 0.2 - 0.1
latt = Lattice(matrix)
frac_coords = np.random.rand(10, 3)
structure = Structure(latt, ["H"] * 10, frac_coords)
ld = structure_2_lmpdata(structure=structure)
box_tilt = [0.0, 0.0, 0.0] if not ld.box_tilt else ld.box_tilt
box_bounds = np.array(ld.box_bounds)
np.testing.assert_array_equal(box_bounds[:, 0], np.zeros(3))
new_matrix = np.diag(box_bounds[:, 1])
new_matrix[1, 0] = box_tilt[0]
new_matrix[2, 0] = box_tilt[1]
new_matrix[2, 1] = box_tilt[2]
new_latt = Lattice(new_matrix)
np.testing.assert_array_almost_equal(new_latt.abc, latt.abc)
np.testing.assert_array_almost_equal(new_latt.angles, latt.angles)
coords = ld.atoms[["x", "y", "z"]].values
new_structure = Structure(new_latt, ["H"] * 10, coords, coords_are_cartesian=True)
np.testing.assert_array_almost_equal(new_structure.frac_coords, frac_coords)
self.assertEqual(len(ld.masses), 1)
# test additional elements
ld_elements = structure_2_lmpdata(structure=structure, ff_elements=["C", "H"])
self.assertEqual(len(ld_elements.masses), 2)
np.testing.assert_array_almost_equal(ld_elements.masses["mass"], [1.00794, 12.01070])
class CombinedDataTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ec = LammpsData.from_file(filename=os.path.join(test_dir, "ec.data"))
cls.fec = LammpsData.from_file(filename=os.path.join(test_dir, "fec.data"))
cls.coord = CombinedData.parse_xyz(filename=os.path.join(test_dir, "ec_fec.xyz"))
cls.ec_fec1 = CombinedData.from_files(
os.path.join(test_dir, "ec_fec.xyz"),
[1200, 300],
os.path.join(test_dir, "ec.data"),
os.path.join(test_dir, "fec.data"),
)
cls.ec_fec2 = CombinedData.from_lammpsdata([cls.ec, cls.fec], ["EC", "FEC"], [1200, 300], cls.coord)
def test_from_files(self):
# general tests
ec_fec = self.ec_fec1
# header stats and Nos. of columns
self.assertEqual(ec_fec.names, ["cluster1", "cluster2"])
self.assertEqual(ec_fec.nums, [1200, 300])
self.assertEqual(ec_fec.masses.shape, (12, 1))
self.assertEqual(ec_fec.atoms.shape, (15000, 6))
self.assertListEqual(list(ec_fec.atoms.columns), ["molecule-ID", "type", "q", "x", "y", "z"])
topo = ec_fec.topology
self.assertEqual(topo["Bonds"].shape, (15000, 3))
self.assertEqual(topo["Angles"].shape, (25500, 4))
self.assertEqual(topo["Dihedrals"].shape, (42000, 5))
self.assertEqual(topo["Impropers"].shape, (1500, 5))
ff = ec_fec.force_field
self.assertEqual(ff["Pair Coeffs"].shape, (12, 2))
self.assertEqual(ff["Bond Coeffs"].shape, (15, 2))
self.assertEqual(ff["Angle Coeffs"].shape, (24, 2))
self.assertEqual(ff["Dihedral Coeffs"].shape, (39, 6))
self.assertEqual(ff["Improper Coeffs"].shape, (2, 3))
# header box
np.testing.assert_array_equal(
ec_fec.box.bounds,
[[-0.597365, 54.56835], [-0.597365, 54.56835], [-0.597365, 54.56835]],
)
# body
self.assertEqual(ec_fec.masses.at[7, "mass"], 1.008)
self.assertEqual(ff["Pair Coeffs"].at[9, "coeff2"], 3.750)
self.assertEqual(ff["Bond Coeffs"].at[5, "coeff2"], 1.0900)
self.assertEqual(ff["Angle Coeffs"].at[24, "coeff2"], 108.46005)
self.assertTrue(np.isnan(ff["Dihedral Coeffs"].at[30, "coeff6"]))
self.assertEqual(ff["Improper Coeffs"].at[2, "coeff1"], 10.5)
self.assertEqual(ec_fec.atoms.at[29, "molecule-ID"], 3)
self.assertEqual(ec_fec.atoms.at[29, "type"], 5)
self.assertEqual(ec_fec.atoms.at[29, "q"], 0.0755)
self.assertAlmostEqual(ec_fec.atoms.at[29, "x"], 14.442260)
self.assertEqual(ec_fec.atoms.at[14958, "molecule-ID"], 1496)
self.assertEqual(ec_fec.atoms.at[14958, "type"], 11)
self.assertAlmostEqual(ec_fec.atoms.at[14958, "y"], 41.010962)
self.assertEqual(topo["Bonds"].at[47, "type"], 5)
self.assertEqual(topo["Bonds"].at[47, "atom2"], 47)
self.assertEqual(topo["Bonds"].at[953, "atom1"], 951)
self.assertEqual(topo["Angles"].at[105, "type"], 2)
self.assertEqual(topo["Angles"].at[105, "atom3"], 63)
self.assertEqual(topo["Angles"].at[14993, "atom2"], 8815)
self.assertEqual(topo["Dihedrals"].at[151, "type"], 4)
self.assertEqual(topo["Dihedrals"].at[151, "atom4"], 55)
self.assertEqual(topo["Dihedrals"].at[41991, "type"], 30)
self.assertEqual(topo["Dihedrals"].at[41991, "atom2"], 14994)
self.assertEqual(topo["Impropers"].at[4, "atom4"], 34)
def test_from_lammpsdata(self):
# general tests
ec_fec = self.ec_fec2
# header stats and Nos. of columns
self.assertEqual(ec_fec.names, ["EC", "FEC"])
self.assertEqual(ec_fec.nums, [1200, 300])
self.assertEqual(ec_fec.masses.shape, (12, 1))
self.assertEqual(ec_fec.atoms.shape, (15000, 6))
self.assertListEqual(list(ec_fec.atoms.columns), ["molecule-ID", "type", "q", "x", "y", "z"])
topo = ec_fec.topology
self.assertEqual(topo["Bonds"].shape, (15000, 3))
self.assertEqual(topo["Angles"].shape, (25500, 4))
self.assertEqual(topo["Dihedrals"].shape, (42000, 5))
self.assertEqual(topo["Impropers"].shape, (1500, 5))
ff = ec_fec.force_field
self.assertEqual(ff["Pair Coeffs"].shape, (12, 2))
self.assertEqual(ff["Bond Coeffs"].shape, (15, 2))
self.assertEqual(ff["Angle Coeffs"].shape, (24, 2))
self.assertEqual(ff["Dihedral Coeffs"].shape, (39, 6))
self.assertEqual(ff["Improper Coeffs"].shape, (2, 3))
# header box
np.testing.assert_array_equal(
ec_fec.box.bounds,
[[-0.597365, 54.56835], [-0.597365, 54.56835], [-0.597365, 54.56835]],
)
# body
self.assertEqual(ec_fec.masses.at[7, "mass"], 1.008)
self.assertEqual(ff["Pair Coeffs"].at[9, "coeff2"], 3.750)
self.assertEqual(ff["Bond Coeffs"].at[5, "coeff2"], 1.0900)
self.assertEqual(ff["Angle Coeffs"].at[24, "coeff2"], 108.46005)
self.assertTrue(np.isnan(ff["Dihedral Coeffs"].at[30, "coeff6"]))
self.assertEqual(ff["Improper Coeffs"].at[2, "coeff1"], 10.5)
self.assertEqual(ec_fec.atoms.at[29, "molecule-ID"], 3)
self.assertEqual(ec_fec.atoms.at[29, "type"], 5)
self.assertEqual(ec_fec.atoms.at[29, "q"], 0.0755)
self.assertAlmostEqual(ec_fec.atoms.at[29, "x"], 14.442260)
self.assertEqual(ec_fec.atoms.at[14958, "molecule-ID"], 1496)
self.assertEqual(ec_fec.atoms.at[14958, "type"], 11)
self.assertAlmostEqual(ec_fec.atoms.at[14958, "y"], 41.010962)
self.assertEqual(topo["Bonds"].at[47, "type"], 5)
self.assertEqual(topo["Bonds"].at[47, "atom2"], 47)
self.assertEqual(topo["Bonds"].at[953, "atom1"], 951)
self.assertEqual(topo["Angles"].at[105, "type"], 2)
self.assertEqual(topo["Angles"].at[105, "atom3"], 63)
self.assertEqual(topo["Angles"].at[14993, "atom2"], 8815)
self.assertEqual(topo["Dihedrals"].at[151, "type"], 4)
self.assertEqual(topo["Dihedrals"].at[151, "atom4"], 55)
self.assertEqual(topo["Dihedrals"].at[41991, "type"], 30)
self.assertEqual(topo["Dihedrals"].at[41991, "atom2"], 14994)
self.assertEqual(topo["Impropers"].at[4, "atom4"], 34)
# non-destructively use of input (ID number)
fec = self.fec
topo = fec.topology
ff = fec.force_field
self.assertEqual(ff["Pair Coeffs"].index[0], 1)
self.assertEqual(ff["Bond Coeffs"].index[0], 1)
self.assertEqual(ff["Angle Coeffs"].index[0], 1)
self.assertTrue(ff["Dihedral Coeffs"].index[0], 1)
self.assertEqual(ff["Improper Coeffs"].index[0], 1)
self.assertEqual(fec.atoms.index[0], 1)
self.assertEqual(fec.atoms.at[1, "molecule-ID"], 1)
self.assertEqual(fec.atoms.at[1, "type"], 1)
self.assertEqual(topo["Bonds"].index[0], 1)
self.assertEqual(topo["Bonds"].at[1, "type"], 1)
self.assertEqual(topo["Bonds"].at[1, "atom1"], 1)
self.assertEqual(topo["Bonds"].at[1, "atom2"], 2)
self.assertEqual(topo["Angles"].index[0], 1)
self.assertEqual(topo["Angles"].at[1, "atom1"], 1)
self.assertEqual(topo["Angles"].at[1, "atom2"], 3)
self.assertEqual(topo["Angles"].at[1, "atom3"], 4)
self.assertEqual(topo["Dihedrals"].index[0], 1)
self.assertEqual(topo["Dihedrals"].at[1, "atom1"], 1)
self.assertEqual(topo["Dihedrals"].at[1, "atom2"], 3)
self.assertEqual(topo["Dihedrals"].at[1, "atom3"], 4)
self.assertEqual(topo["Dihedrals"].at[1, "atom4"], 5)
self.assertEqual(topo["Impropers"].index[0], 1)
self.assertEqual(topo["Impropers"].at[1, "atom1"], 5)
self.assertEqual(topo["Impropers"].at[1, "atom2"], 4)
self.assertEqual(topo["Impropers"].at[1, "atom3"], 3)
self.assertEqual(topo["Impropers"].at[1, "atom4"], 6)
def test_get_string(self):
# general tests
ec_fec_lines = self.ec_fec1.get_string().splitlines()
# header information
self.assertEqual(ec_fec_lines[1], "# 1200 cluster1 + 300 cluster2")
# data type consistency tests
self.assertEqual(ec_fec_lines[98], "1 harmonic 3.200000000 -1 2")
self.assertEqual(ec_fec_lines[109], "12 charmm 2.700000000 2 180 0.0")
self.assertEqual(
ec_fec_lines[113],
"16 multi/harmonic 0.382999522 -1.148998570 0.000000000 1.531998090 0.000000000",
)
self.assertEqual(ec_fec_lines[141], "1 10.5 -1 2")
if __name__ == "__main__":
unittest.main()
|
richardtran415/pymatgen
|
pymatgen/io/lammps/tests/test_data.py
|
Python
|
mit
| 45,469
|
[
"CHARMM",
"LAMMPS",
"pymatgen"
] |
43861f67ffab0a413aa45fb7a0f43a86f0e5b6e8064b16e68eecdf47c4f921cd
|
import json
import os
import unittest
from io import open
from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine
from pymatgen.phonon.dos import CompletePhononDos
from pymatgen.phonon.plotter import PhononBSPlotter, PhononDosPlotter, ThermoPlotter
from pymatgen.util.testing import PymatgenTest
class PhononDosPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "NaCl_complete_ph_dos.json"), "r") as f:
self.dos = CompletePhononDos.from_dict(json.load(f))
self.plotter = PhononDosPlotter(sigma=0.2, stack=True)
self.plotter_nostack = PhononDosPlotter(sigma=0.2, stack=False)
def test_add_dos_dict(self):
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 0)
self.plotter.add_dos_dict(self.dos.get_element_dos(), key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 2)
def test_get_dos_dict(self):
self.plotter.add_dos_dict(self.dos.get_element_dos(), key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
for el in ["Na", "Cl"]:
self.assertIn(el, d)
def test_plot(self):
# Disabling latex for testing.
from matplotlib import rc
rc("text", usetex=False)
self.plotter.add_dos("Total", self.dos)
self.plotter.get_plot(units="mev")
self.plotter_nostack.add_dos("Total", self.dos)
self.plotter_nostack.get_plot(units="mev")
class PhononBSPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "NaCl_phonon_bandstructure.json"), "r") as f:
d = json.loads(f.read())
self.bs = PhononBandStructureSymmLine.from_dict(d)
self.plotter = PhononBSPlotter(self.bs)
def test_bs_plot_data(self):
self.assertEqual(
len(self.plotter.bs_plot_data()["distances"][0]),
51,
"wrong number of distances in the first branch",
)
self.assertEqual(len(self.plotter.bs_plot_data()["distances"]), 4, "wrong number of branches")
self.assertEqual(
sum([len(e) for e in self.plotter.bs_plot_data()["distances"]]),
204,
"wrong number of distances",
)
self.assertEqual(self.plotter.bs_plot_data()["ticks"]["label"][4], "Y", "wrong tick label")
self.assertEqual(
len(self.plotter.bs_plot_data()["ticks"]["label"]),
8,
"wrong number of tick labels",
)
def test_plot(self):
# Disabling latex for testing.
from matplotlib import rc
rc("text", usetex=False)
self.plotter.get_plot(units="mev")
def test_plot_compare(self):
# Disabling latex for testing.
from matplotlib import rc
rc("text", usetex=False)
self.plotter.plot_compare(self.plotter, units="mev")
class ThermoPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "NaCl_complete_ph_dos.json"), "r") as f:
self.dos = CompletePhononDos.from_dict(json.load(f))
self.plotter = ThermoPlotter(self.dos, self.dos.structure)
def test_plot_functions(self):
# Disabling latex for testing.
from matplotlib import rc
rc("text", usetex=False)
self.plotter.plot_cv(5, 100, 5, show=False)
self.plotter.plot_entropy(5, 100, 5, show=False)
self.plotter.plot_internal_energy(5, 100, 5, show=False)
self.plotter.plot_helmholtz_free_energy(5, 100, 5, show=False)
self.plotter.plot_thermodynamic_properties(5, 100, 5, show=False, fig_close=True)
# Gruneisen plotter is already tested in test_gruneisen
if __name__ == "__main__":
unittest.main()
|
gmatteo/pymatgen
|
pymatgen/phonon/tests/test_plotter.py
|
Python
|
mit
| 3,863
|
[
"pymatgen"
] |
6358eb3c4389f72cc746547ee5ffbb070e01377235015714f020f27225ffbd5b
|
"""
Python implementation of the LiNGAM algorithms.
The LiNGAM Project: https://sites.google.com/site/sshimizu06/lingam
"""
import itertools
import numbers
import warnings
import numpy as np
from sklearn.utils import check_array, resample
from .bootstrap import BootstrapResult
from .direct_lingam import DirectLiNGAM
from .hsic import hsic_test_gamma
from .utils import predict_adaptive_lasso
class MultiGroupDirectLiNGAM(DirectLiNGAM):
"""Implementation of DirectLiNGAM Algorithm with multiple groups [1]_
References
----------
.. [1] S. Shimizu. Joint estimation of linear non-Gaussian acyclic models. Neurocomputing, 81: 104-107, 2012.
"""
def __init__(
self,
random_state=None,
prior_knowledge=None,
apply_prior_knowledge_softly=False,
):
"""Construct a model.
Parameters
----------
random_state : int, optional (default=None)
``random_state`` is the seed used by the random number generator.
prior_knowledge : array-like, shape (n_features, n_features), optional (default=None)
Prior knowledge used for causal discovery, where ``n_features`` is the number of features.
The elements of prior knowledge matrix are defined as follows [1]_:
* ``0`` : :math:`x_i` does not have a directed path to :math:`x_j`
* ``1`` : :math:`x_i` has a directed path to :math:`x_j`
* ``-1`` : No prior knowledge is available to know if either of the two cases above (0 or 1) is true.
apply_prior_knowledge_softly : boolean, optional (default=False)
If True, apply prior knowledge softly.
"""
super().__init__(random_state, prior_knowledge, apply_prior_knowledge_softly)
def fit(self, X_list):
"""Fit the model to multiple datasets.
Parameters
----------
X_list : list, shape [X, ...]
Multiple datasets for training, where ``X`` is an dataset.
The shape of ''X'' is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
# Check parameters
X_list = self._check_X_list(X_list)
if self._Aknw is not None:
if (self._n_features, self._n_features) != self._Aknw.shape:
raise ValueError(
"The shape of prior knowledge must be (n_features, n_features)"
)
else:
# Extract all partial orders in prior knowledge matrix
if not self._apply_prior_knowledge_softly:
self._partial_orders = self._extract_partial_orders(self._Aknw)
# Causal discovery
U = np.arange(self._n_features)
K = []
X_list_ = [np.copy(X) for X in X_list]
for _ in range(self._n_features):
m = self._search_causal_order(X_list_, U)
for i in U:
if i != m:
for d in range(len(X_list_)):
X_list_[d][:, i] = self._residual(
X_list_[d][:, i], X_list_[d][:, m]
)
K.append(m)
U = U[U != m]
if (self._Aknw is not None) and (not self._apply_prior_knowledge_softly):
self._partial_orders = self._partial_orders[
self._partial_orders[:, 0] != m
]
self._causal_order = K
self._adjacency_matrices = []
for X in X_list:
self._estimate_adjacency_matrix(X, prior_knowledge=self._Aknw)
self._adjacency_matrices.append(self._adjacency_matrix)
return self
def bootstrap(self, X_list, n_sampling):
"""Evaluate the statistical reliability of DAG based on the bootstrapping.
Parameters
----------
X_list : array-like, shape (X, ...)
Multiple datasets for training, where ``X`` is an dataset.
The shape of ''X'' is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
n_sampling : int
Number of bootstrapping samples.
Returns
-------
results : array-like, shape (BootstrapResult, ...)
Returns the results of bootstrapping for multiple datasets.
"""
# Check parameters
X_list = self._check_X_list(X_list)
if isinstance(n_sampling, (numbers.Integral, np.integer)):
if not 0 < n_sampling:
raise ValueError("n_sampling must be an integer greater than 0.")
else:
raise ValueError("n_sampling must be an integer greater than 0.")
# Bootstrapping
adjacency_matrices_list = np.zeros(
[len(X_list), n_sampling, self._n_features, self._n_features]
)
total_effects_list = np.zeros(
[len(X_list), n_sampling, self._n_features, self._n_features]
)
for n in range(n_sampling):
resampled_X_list = [resample(X) for X in X_list]
self.fit(resampled_X_list)
for i, am in enumerate(self._adjacency_matrices):
adjacency_matrices_list[i][n] = am
# Calculate total effects
for c, from_ in enumerate(self._causal_order):
for to in self._causal_order[c + 1 :]:
effects = self.estimate_total_effect(resampled_X_list, from_, to)
for i, effect in enumerate(effects):
total_effects_list[i, n, to, from_] = effect
result_list = []
for am, te in zip(adjacency_matrices_list, total_effects_list):
result_list.append(BootstrapResult(am, te))
return result_list
def estimate_total_effect(self, X_list, from_index, to_index):
"""Estimate total effect using causal model.
Parameters
----------
X_list : array-like, shape (X, ...)
Multiple datasets for training, where ``X`` is an dataset.
The shape of ''X'' is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
from_index :
Index of source variable to estimate total effect.
to_index :
Index of destination variable to estimate total effect.
Returns
-------
total_effect : float
Estimated total effect.
"""
# Check parameters
X_list = self._check_X_list(X_list)
# Check from/to causal order
from_order = self._causal_order.index(from_index)
to_order = self._causal_order.index(to_index)
if from_order > to_order:
warnings.warn(
f"The estimated causal effect may be incorrect because "
f"the causal order of the destination variable (to_index={to_index}) "
f"is earlier than the source variable (from_index={from_index})."
)
effects = []
for X, am in zip(X_list, self._adjacency_matrices):
# from_index + parents indices
parents = np.where(np.abs(am[from_index]) > 0)[0]
predictors = [from_index]
predictors.extend(parents)
# Estimate total effect
coefs = predict_adaptive_lasso(X, predictors, to_index)
effects.append(coefs[0])
return effects
def get_error_independence_p_values(self, X_list):
"""Calculate the p-value matrix of independence between error variables.
Parameters
----------
X_list : array-like, shape (X, ...)
Multiple datasets for training, where ``X`` is an dataset.
The shape of ''X'' is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
Returns
-------
independence_p_values : array-like, shape (n_datasets, n_features, n_features)
p-value matrix of independence between error variables.
"""
# Check parameters
X_list = self._check_X_list(X_list)
p_values = np.zeros([len(X_list), self._n_features, self._n_features])
for d, (X, am) in enumerate(zip(X_list, self._adjacency_matrices)):
n_samples = X.shape[0]
E = X - np.dot(am, X.T).T
for i, j in itertools.combinations(range(self._n_features), 2):
_, p_value = hsic_test_gamma(
np.reshape(E[:, i], [n_samples, 1]),
np.reshape(E[:, j], [n_samples, 1]),
)
p_values[d, i, j] = p_value
p_values[d, j, i] = p_value
return p_values
def _check_X_list(self, X_list):
"""Check input X list."""
if not isinstance(X_list, list):
raise ValueError("X_list must be a list.")
if len(X_list) < 2:
raise ValueError("X_list must be a list containing at least two items")
self._n_features = check_array(X_list[0]).shape[1]
X_list_ = []
for X in X_list:
X_ = check_array(X)
if X_.shape[1] != self._n_features:
raise ValueError(
"X_list must be a list with the same number of features"
)
X_list_.append(X_)
return np.array(X_list_)
def _search_causal_order(self, X_list, U):
"""Search the causal ordering."""
Uc, Vj = self._search_candidate(U)
if len(Uc) == 1:
return Uc[0]
total_size = 0
for X in X_list:
total_size += len(X)
MG_list = []
for i in Uc:
MG = 0
for X in X_list:
M = 0
for j in U:
if i != j:
xi_std = (X[:, i] - np.mean(X[:, i])) / np.std(X[:, i])
xj_std = (X[:, j] - np.mean(X[:, j])) / np.std(X[:, j])
ri_j = (xi_std if i in Vj and j in Uc else self._residual(xi_std, xj_std))
rj_i = (xj_std if j in Vj and i in Uc else self._residual(xj_std, xi_std))
M += (np.min([0, self._diff_mutual_info(xi_std, xj_std, ri_j, rj_i)]) ** 2)
MG += M * (len(X) / total_size)
MG_list.append(-1.0 * MG)
return Uc[np.argmax(MG_list)]
@property
def adjacency_matrices_(self):
"""Estimated adjacency matrices.
Returns
-------
adjacency_matrices_ : array-like, shape (B, ...)
The list of adjacency matrix B for multiple datasets.
The shape of B is (n_features, n_features), where
n_features is the number of features.
"""
return self._adjacency_matrices
|
cdt15/lingam
|
lingam/multi_group_direct_lingam.py
|
Python
|
mit
| 11,025
|
[
"Gaussian"
] |
6342483f4a4d6bce1f87c0f3901cb33898d15d9bfd246ad4edf033a2bba3dd52
|
# $Id$
#
# Copyright (C) 2006 Greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from rdkit import Geometry
from rdkit.Chem.FeatMaps import FeatMaps, FeatMapPoint
import re
"""
ScoreMode=All
DirScoreMode=Ignore
BeginParams
family=Aromatic radius=2.5 width=1.0 profile=Gaussian
family=Acceptor radius=1.5
EndParams
# optional
BeginPoints
family=Acceptor pos=(1.0, 0.0, 5.0) weight=1.25 dir=(1, 1, 0)
family=Aromatic pos=(0.0,1.0,0.0) weight=2.0 dir=(0,0,1) dir=(0,0,-1)
family=Acceptor pos=(1.0,1.0,2.0) weight=1.25
EndPoints
"""
class FeatMapParseError(ValueError):
pass
class FeatMapParser(object):
data = None
def __init__(self, file=None, data=None):
if file:
self.data = file.readlines()
elif data:
self.SetData(data)
self._lineNum = 0
def SetData(self, data):
if isinstance(data, str):
self.data = data.split('\n')
else:
self.data = data
self._lineNum = 0
def _NextLine(self):
txt = ''
while 1:
try:
l = self.data[self._lineNum].split('#')[0].strip()
except IndexError:
break
self._lineNum += 1
if l:
txt += l
if l[-1] != '\\':
break
return txt
def Parse(self, featMap=None):
if featMap is None:
featMap = FeatMaps.FeatMap()
l = self._NextLine().strip()
while l:
splitL = l.split('=')
if len(splitL) == 1:
keyword = splitL[0].strip().lower()
if keyword == 'beginpoints':
pts = self.ParseFeatPointBlock()
for pt in pts:
featMap.AddFeatPoint(pt)
elif keyword == 'beginparams':
featMap.params = self.ParseParamBlock()
else:
raise FeatMapParseError('Unrecognized keyword %s on line %d' % (keyword, self._lineNum))
else:
keyword = splitL[0].strip().lower()
val = splitL[1].strip()
if keyword == 'scoremode':
try:
featMap.scoreMode = getattr(FeatMaps.FeatMapScoreMode, val)
except AttributeError:
raise FeatMapParseError('ScoreMode %s not recognized on line %d' % (val, self._lineNum))
elif keyword == 'dirscoremode':
try:
featMap.dirScoreMode = getattr(FeatMaps.FeatDirScoreMode, val)
except AttributeError:
raise FeatMapParseError('DirScoreMode %s not recognized on line %d' %
(val, self._lineNum))
else:
raise FeatMapParseError('Unrecognized keyword %s on line %d' % (keyword, self._lineNum))
l = self._NextLine().strip()
return featMap
def ParseParamBlock(self):
paramLineSplitter = re.compile(r'([a-zA-Z]+) *= *(\S+)')
params = {}
l = self._NextLine()
while l and l != 'EndParams':
param = FeatMaps.FeatMapParams()
vals = paramLineSplitter.findall(l)
for name, val in vals:
name = name.lower()
if name == 'family':
family = val
elif name == 'radius':
param.radius = float(val)
elif name == 'width':
param.width = float(val)
elif name == 'profile':
try:
param.featProfile = getattr(param.FeatProfile, val)
except AttributeError:
raise FeatMapParseError('Profile %s not recognized on line %d' % (val, self._lineNum))
else:
raise FeatMapParseError('FeatMapParam option %s not recognized on line %d' %
(name, self._lineNum))
params[family] = param
l = self._NextLine()
if l != 'EndParams':
raise FeatMapParseError('EndParams line not found')
return params
def _parsePoint(self, txt):
txt = txt.strip()
startP = 0
endP = len(txt)
if txt[0] == '(':
startP += 1
if txt[-1] == ')':
endP -= 1
txt = txt[startP:endP]
splitL = txt.split(',')
if len(splitL) != 3:
raise ValueError('Bad location string')
vs = [float(x) for x in splitL]
pt = Geometry.Point3D(vs[0], vs[1], vs[2])
return pt
def ParseFeatPointBlock(self):
featLineSplitter = re.compile(r'([a-zA-Z]+) *= *')
feats = []
l = self._NextLine()
while l and l != 'EndPoints':
vals = featLineSplitter.split(l)
while vals.count(''):
vals.remove('')
p = FeatMapPoint.FeatMapPoint()
i = 0
while i < len(vals):
name = vals[i].lower()
if name == 'family':
i += 1
val = vals[i].strip()
p.SetFamily(val)
elif name == 'weight':
i += 1
val = float(vals[i])
p.weight = val
elif name == 'pos':
i += 1
val = vals[i]
pos = self._parsePoint(val)
p.SetPos(pos)
elif name == 'dir':
i += 1
val = vals[i]
pos = self._parsePoint(val)
p.featDirs.append(pos)
else:
raise FeatMapParseError('FeatPoint option %s not recognized on line %d' %
(name, self._lineNum))
i += 1
feats.append(p)
l = self._NextLine()
return feats
|
rvianello/rdkit
|
rdkit/Chem/FeatMaps/FeatMapParser.py
|
Python
|
bsd-3-clause
| 5,307
|
[
"Gaussian",
"RDKit"
] |
b95abe06b7adec31cc13f45bb2c1c13ac3ae22e7bd997f1110d3ffe1b045cf39
|
from PIL import ImageFilter
def generate_gaussian_noise_by_level(image, level, width):
"""
Add Gaussian noise of an intended level to an image.
:param image: an image input
:param level: represent the percentage of blur. (Ex: level 1 means that the
image is blurred with the radius of 0.5 percent of its width.)
:param width: the width that the level refers to. Input image.width if its
width is to be the benchmark. And specify otherwise if not.
:type image: an image file
:type level: a float from 0.0 to 100.0
:type width: int (representing pixels)
"""
return image.filter(ImageFilter.GaussianBlur(radius = int(width*level/200)))
def generate_gaussian_noise_by_radius(image, radius):
"""
Add Gaussian noise of an intended radius to an image
:param image: an image input
:param radius: the radius of the blur
:type image: an image file
:type radius: int (representing pixel)
"""
return image.filter(ImageFilter.GaussianBlur(radius = radius))
|
FlintHill/SUAS-Competition
|
UpdatedImageProcessing/UpdatedImageProcessing/ShapeDetection/utils/gaussian_blur.py
|
Python
|
mit
| 1,054
|
[
"Gaussian"
] |
9e3ee5c30c8a0290e1d89099604124d7c2681cd7426fa864e16a63a9787d34db
|
# Copyright 2022 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Octopus."""
from brax.experimental.composer.components.common import upright_term_fn
ROOT = 'octopus'
SYSTEM_CONFIG = """
bodies {
name: "octopus"
colliders {
position {
z: 0.009999999776482582
}
sphere {
radius: 0.009999999776482582
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_body"
colliders {
position {
z: 0.009999999776482582
}
sphere {
radius: 0.009999999776482582
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_B"
colliders {
position {
x: -0.03700000047683716
z: 0.11810000240802765
}
rotation {
y: -45.00010681152344
}
capsule {
radius: 0.06199999898672104
length: 0.23399999737739563
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_0_B"
colliders {
position {
y: 0.06830000132322311
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.057999998331069946
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_0_0_B"
colliders {
position {
y: 0.03700000047683716
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.11800000071525574
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_0_0_0_B"
colliders {
position {
y: 0.03700000047683716
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.11800000071525574
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_1_B"
colliders {
position {
y: 0.06830000132322311
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.057999998331069946
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_1_0_B"
colliders {
position {
y: 0.03700000047683716
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.11800000071525574
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_1_0_0_B"
colliders {
position {
y: 0.03700000047683716
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.11800000071525574
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_2_B"
colliders {
position {
y: 0.06830000132322311
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.057999998331069946
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_2_0_B"
colliders {
position {
y: 0.03700000047683716
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.11800000071525574
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_2_0_0_B"
colliders {
position {
y: 0.03700000047683716
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.11800000071525574
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_3_B"
colliders {
position {
y: 0.06830000132322311
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.057999998331069946
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_3_0_B"
colliders {
position {
y: 0.03700000047683716
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.11800000071525574
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_3_0_0_B"
colliders {
position {
y: 0.03700000047683716
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.11800000071525574
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_4_B"
colliders {
position {
y: 0.06830000132322311
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.057999998331069946
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_4_0_B"
colliders {
position {
y: 0.03700000047683716
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.11800000071525574
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_4_0_0_B"
colliders {
position {
y: 0.03700000047683716
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.11800000071525574
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_5_B"
colliders {
position {
y: 0.06830000132322311
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.057999998331069946
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_5_0_B"
colliders {
position {
y: 0.03700000047683716
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.11800000071525574
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_5_0_0_B"
colliders {
position {
y: 0.03700000047683716
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.11800000071525574
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_6_B"
colliders {
position {
y: 0.06830000132322311
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.057999998331069946
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_6_0_B"
colliders {
position {
y: 0.03700000047683716
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.11800000071525574
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_6_0_0_B"
colliders {
position {
y: 0.03700000047683716
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.11800000071525574
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_7_B"
colliders {
position {
y: 0.06830000132322311
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.057999998331069946
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_7_0_B"
colliders {
position {
y: 0.03700000047683716
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.11800000071525574
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
bodies {
name: "octopus_0_7_0_0_B"
colliders {
position {
y: 0.03700000047683716
}
rotation {
x: 89.95437622070312
}
capsule {
radius: 0.02199999988079071
length: 0.11800000071525574
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen {
position {
}
rotation {
}
}
}
joints {
name: "$octopus.octopus_body"
stiffness: 7500.0
parent: "octopus"
child: "octopus_body"
rotation {
y: -90.0
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
}
}
joints {
name: "octopus_0_J_R100"
stiffness: 7500.0
parent: "octopus_body"
child: "octopus_0_B"
parent_offset {
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
}
}
joints {
name: "octopus_0_0_J_R100"
stiffness: 7500.0
parent: "octopus_0_B"
child: "octopus_0_0_B"
parent_offset {
z: 0.014999999664723873
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
}
}
joints {
name: "octopus_0_0_0_J_R100"
stiffness: 7500.0
parent: "octopus_0_0_B"
child: "octopus_0_0_0_B"
parent_offset {
y: 0.07400000095367432
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
}
}
joints {
name: "octopus_0_0_0_0_J_R100"
stiffness: 7500.0
parent: "octopus_0_0_0_B"
child: "octopus_0_0_0_0_B"
parent_offset {
y: 0.07400000095367432
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
}
}
joints {
name: "octopus_0_1_J_R100"
stiffness: 7500.0
parent: "octopus_0_B"
child: "octopus_0_1_B"
parent_offset {
z: 0.014999999664723873
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
z: 45.0
}
}
joints {
name: "octopus_0_1_0_J_R100"
stiffness: 7500.0
parent: "octopus_0_1_B"
child: "octopus_0_1_0_B"
parent_offset {
y: 0.07400000095367432
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
}
}
joints {
name: "octopus_0_1_0_0_J_R100"
stiffness: 7500.0
parent: "octopus_0_1_0_B"
child: "octopus_0_1_0_0_B"
parent_offset {
y: 0.07400000095367432
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
}
}
joints {
name: "octopus_0_2_J_R100"
stiffness: 7500.0
parent: "octopus_0_B"
child: "octopus_0_2_B"
parent_offset {
z: 0.014999999664723873
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
z: 90.0
}
}
joints {
name: "octopus_0_2_0_J_R100"
stiffness: 7500.0
parent: "octopus_0_2_B"
child: "octopus_0_2_0_B"
parent_offset {
y: 0.07400000095367432
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
}
}
joints {
name: "octopus_0_2_0_0_J_R100"
stiffness: 7500.0
parent: "octopus_0_2_0_B"
child: "octopus_0_2_0_0_B"
parent_offset {
y: 0.07400000095367432
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
}
}
joints {
name: "octopus_0_3_J_R100"
stiffness: 7500.0
parent: "octopus_0_B"
child: "octopus_0_3_B"
parent_offset {
z: 0.014999999664723873
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
z: 135.0
}
}
joints {
name: "octopus_0_3_0_J_R100"
stiffness: 7500.0
parent: "octopus_0_3_B"
child: "octopus_0_3_0_B"
parent_offset {
y: 0.07400000095367432
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
}
}
joints {
name: "octopus_0_3_0_0_J_R100"
stiffness: 7500.0
parent: "octopus_0_3_0_B"
child: "octopus_0_3_0_0_B"
parent_offset {
y: 0.07400000095367432
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
}
}
joints {
name: "octopus_0_4_J_R100"
stiffness: 7500.0
parent: "octopus_0_B"
child: "octopus_0_4_B"
parent_offset {
z: 0.014999999664723873
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
z: 180.0
}
}
joints {
name: "octopus_0_4_0_J_R100"
stiffness: 7500.0
parent: "octopus_0_4_B"
child: "octopus_0_4_0_B"
parent_offset {
y: 0.07400000095367432
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
}
}
joints {
name: "octopus_0_4_0_0_J_R100"
stiffness: 7500.0
parent: "octopus_0_4_0_B"
child: "octopus_0_4_0_0_B"
parent_offset {
y: 0.07400000095367432
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
}
}
joints {
name: "octopus_0_5_J_R100"
stiffness: 7500.0
parent: "octopus_0_B"
child: "octopus_0_5_B"
parent_offset {
z: 0.014999999664723873
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
z: -135.0
}
}
joints {
name: "octopus_0_5_0_J_R100"
stiffness: 7500.0
parent: "octopus_0_5_B"
child: "octopus_0_5_0_B"
parent_offset {
y: 0.07400000095367432
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
}
}
joints {
name: "octopus_0_5_0_0_J_R100"
stiffness: 7500.0
parent: "octopus_0_5_0_B"
child: "octopus_0_5_0_0_B"
parent_offset {
y: 0.07400000095367432
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
}
}
joints {
name: "octopus_0_6_J_R100"
stiffness: 7500.0
parent: "octopus_0_B"
child: "octopus_0_6_B"
parent_offset {
z: 0.014999999664723873
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
z: -90.0
}
}
joints {
name: "octopus_0_6_0_J_R100"
stiffness: 7500.0
parent: "octopus_0_6_B"
child: "octopus_0_6_0_B"
parent_offset {
y: 0.07400000095367432
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
}
}
joints {
name: "octopus_0_6_0_0_J_R100"
stiffness: 7500.0
parent: "octopus_0_6_0_B"
child: "octopus_0_6_0_0_B"
parent_offset {
y: 0.07400000095367432
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
}
}
joints {
name: "octopus_0_7_J_R100"
stiffness: 7500.0
parent: "octopus_0_B"
child: "octopus_0_7_B"
parent_offset {
z: 0.014999999664723873
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
z: -45.0
}
}
joints {
name: "octopus_0_7_0_J_R100"
stiffness: 7500.0
parent: "octopus_0_7_B"
child: "octopus_0_7_0_B"
parent_offset {
y: 0.07400000095367432
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
}
}
joints {
name: "octopus_0_7_0_0_J_R100"
stiffness: 7500.0
parent: "octopus_0_7_0_B"
child: "octopus_0_7_0_0_B"
parent_offset {
y: 0.07400000095367432
}
child_offset {
}
rotation {
}
angular_damping: 10.0
angle_limit {
min: -45.0
max: 45.0
}
limit_strength: 400.0
spring_damping: 50.0
reference_rotation {
}
}
actuators {
name: "$octopus.octopus_body"
joint: "$octopus.octopus_body"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_J_R100"
joint: "octopus_0_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_0_J_R100"
joint: "octopus_0_0_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_0_0_J_R100"
joint: "octopus_0_0_0_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_0_0_0_J_R100"
joint: "octopus_0_0_0_0_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_1_J_R100"
joint: "octopus_0_1_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_1_0_J_R100"
joint: "octopus_0_1_0_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_1_0_0_J_R100"
joint: "octopus_0_1_0_0_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_2_J_R100"
joint: "octopus_0_2_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_2_0_J_R100"
joint: "octopus_0_2_0_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_2_0_0_J_R100"
joint: "octopus_0_2_0_0_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_3_J_R100"
joint: "octopus_0_3_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_3_0_J_R100"
joint: "octopus_0_3_0_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_3_0_0_J_R100"
joint: "octopus_0_3_0_0_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_4_J_R100"
joint: "octopus_0_4_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_4_0_J_R100"
joint: "octopus_0_4_0_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_4_0_0_J_R100"
joint: "octopus_0_4_0_0_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_5_J_R100"
joint: "octopus_0_5_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_5_0_J_R100"
joint: "octopus_0_5_0_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_5_0_0_J_R100"
joint: "octopus_0_5_0_0_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_6_J_R100"
joint: "octopus_0_6_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_6_0_J_R100"
joint: "octopus_0_6_0_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_6_0_0_J_R100"
joint: "octopus_0_6_0_0_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_7_J_R100"
joint: "octopus_0_7_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_7_0_J_R100"
joint: "octopus_0_7_0_J_R100"
strength: 150.0
torque {
}
}
actuators {
name: "octopus_0_7_0_0_J_R100"
joint: "octopus_0_7_0_0_J_R100"
strength: 150.0
torque {
}
}
"""
COLLIDES = ('octopus', 'octopus_body', 'octopus_0_B')
for i in range(8):
COLLIDES = (
f'octopus_0_{i}_B',
f'octopus_0_{i}_0_B',
f'octopus_0_{i}_0_0_B',
)
DEFAULT_OBSERVERS = ('root_z_joints',)
def get_specs():
return dict(
message_str=SYSTEM_CONFIG,
collides=COLLIDES,
root=ROOT,
term_fn=upright_term_fn,
observers=DEFAULT_OBSERVERS)
|
google/brax
|
brax/experimental/composer/components/octopus.py
|
Python
|
apache-2.0
| 22,585
|
[
"Octopus"
] |
9f04e7f4d751695528bfd373def912f1e9eea1078acb6b3a2bc09f802365e7c4
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
import os
from unittest import TestCase
import warnings
from django.utils import html, safestring
from django.utils._os import upath
from django.utils.encoding import force_text
class TestUtilsHtml(TestCase):
def check_output(self, function, value, output=None):
"""
Check that function(value) equals output. If output is None,
check that function(value) equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
f = html.escape
items = (
('&', '&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
for pattern in patterns:
self.check_output(f, pattern % value, pattern % output)
# Check repeated values.
self.check_output(f, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(f, '<&', '<&')
def test_format_html(self):
self.assertEqual(
html.format_html("{} {} {third} {fourth}",
"< Dangerous >",
html.mark_safe("<b>safe</b>"),
third="< dangerous again",
fourth=html.mark_safe("<i>safe again</i>")
),
"< Dangerous > <b>safe</b> < dangerous again <i>safe again</i>"
)
def test_linebreaks(self):
f = html.linebreaks
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
("para1\nsub1\rsub2\n\npara2", "<p>para1<br />sub1<br />sub2</p>\n\n<p>para2</p>"),
("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br />sub1</p>\n\n<p>para4</p>"),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_tags(self):
f = html.strip_tags
items = (
('<p>See: 'é is an apostrophe followed by e acute</p>',
'See: 'é is an apostrophe followed by e acute'),
('<adf>a', 'a'),
('</adf>a', 'a'),
('<asdf><asdf>e', 'e'),
('hi, <f x', 'hi, <f x'),
('234<235, right?', '234<235, right?'),
('a4<a5 right?', 'a4<a5 right?'),
('b7>b2!', 'b7>b2!'),
('</fe', '</fe'),
('<x>b<y>', 'b'),
('a<p onclick="alert(\'<test>\')">b</p>c', 'abc'),
('a<p a >b</p>c', 'abc'),
('d<a:b c:d>e</p>f', 'def'),
('<strong>foo</strong><a href="http://example.com">bar</a>', 'foobar'),
)
for value, output in items:
self.check_output(f, value, output)
# Some convoluted syntax for which parsing may differ between python versions
output = html.strip_tags('<sc<!-- -->ript>test<<!-- -->/script>')
self.assertNotIn('<script>', output)
self.assertIn('test', output)
output = html.strip_tags('<script>alert()</script>&h')
self.assertNotIn('<script>', output)
self.assertIn('alert()', output)
# Test with more lengthy content (also catching performance regressions)
for filename in ('strip_tags1.html', 'strip_tags2.txt'):
path = os.path.join(os.path.dirname(upath(__file__)), 'files', filename)
with open(path, 'r') as fp:
content = force_text(fp.read())
start = datetime.now()
stripped = html.strip_tags(content)
elapsed = datetime.now() - start
self.assertEqual(elapsed.seconds, 0)
self.assertIn("Please try again.", stripped)
self.assertNotIn('<', stripped)
def test_strip_spaces_between_tags(self):
f = html.strip_spaces_between_tags
# Strings that should come out untouched.
items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>')
for value in items:
self.check_output(f, value)
# Strings that have spaces to strip.
items = (
('<d> </d>', '<d></d>'),
('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'),
('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_entities(self):
f = html.strip_entities
# Strings that should come out untouched.
values = ("&", "&a", "&a", "a&#a")
for value in values:
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.check_output(f, value)
# Valid entities that should be stripped from the patterns.
entities = ("", "", "&a;", "&fdasdfasdfasdf;")
patterns = (
("asdf %(entity)s ", "asdf "),
("%(entity)s%(entity)s", ""),
("&%(entity)s%(entity)s", "&"),
("%(entity)s3", "3"),
)
for entity in entities:
for in_pattern, output in patterns:
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.check_output(f, in_pattern % {'entity': entity}, output)
def test_escapejs(self):
f = html.escapejs
items = (
('"double quotes" and \'single quotes\'', '\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027'),
(r'\ : backslashes, too', '\\u005C : backslashes, too'),
('and lots of whitespace: \r\n\t\v\f\b', 'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008'),
(r'<script>and this</script>', '\\u003Cscript\\u003Eand this\\u003C/script\\u003E'),
('paragraph separator:\u2029and line separator:\u2028', 'paragraph separator:\\u2029and line separator:\\u2028'),
)
for value, output in items:
self.check_output(f, value, output)
def test_remove_tags(self):
f = html.remove_tags
items = (
("<b><i>Yes</i></b>", "b i", "Yes"),
("<a>x</a> <p><b>y</b></p>", "a b", "x <p>y</p>"),
)
for value, tags, output in items:
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.assertEqual(f(value, tags), output)
def test_smart_urlquote(self):
quote = html.smart_urlquote
# Ensure that IDNs are properly quoted
self.assertEqual(quote('http://öäü.com/'), 'http://xn--4ca9at.com/')
self.assertEqual(quote('http://öäü.com/öäü/'), 'http://xn--4ca9at.com/%C3%B6%C3%A4%C3%BC/')
# Ensure that everything unsafe is quoted, !*'();:@&=+$,/?#[]~ is considered safe as per RFC
self.assertEqual(quote('http://example.com/path/öäü/'), 'http://example.com/path/%C3%B6%C3%A4%C3%BC/')
self.assertEqual(quote('http://example.com/%C3%B6/ä/'), 'http://example.com/%C3%B6/%C3%A4/')
self.assertEqual(quote('http://example.com/?x=1&y=2+3&z='), 'http://example.com/?x=1&y=2+3&z=')
self.assertEqual(quote('http://example.com/?x=<>"\''), 'http://example.com/?x=%3C%3E%22%27')
self.assertEqual(quote('http://example.com/?q=http://example.com/?x=1%26q=django'),
'http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango')
self.assertEqual(quote('http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango'),
'http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango')
def test_conditional_escape(self):
s = '<h1>interop</h1>'
self.assertEqual(html.conditional_escape(s),
'<h1>interop</h1>')
self.assertEqual(html.conditional_escape(safestring.mark_safe(s)), s)
|
gdi2290/django
|
tests/utils_tests/test_html.py
|
Python
|
bsd-3-clause
| 8,304
|
[
"ADF"
] |
3115f31e6a040c806f32873cc1d7f1bf3b2e73961848f27e53be85ec7130cf22
|
from pysimm import system, lmps, forcefield
def run(test=False):
# create empty system
print('Example progress: Creating an empty system...')
s = system.System()
# create new molecule in our system
print('Example progress: Adding an empty molecule container to our system...')
m = s.molecules.add(system.Molecule())
# retrieve Dreiding parameters
print('Example progress: Retrieving Dreiding force field parameters...')
f = forcefield.Dreiding()
s.forcefield = f.name
# get a copy of the C_ particle type object from Dreiding
# get method returns a list, we need the first element
dreiding_C_ = s.particle_types.add(f.particle_types.get('C_3')[0].copy())
# get H_ particle type object from Dreiding
dreiding_H_ = s.particle_types.add(f.particle_types.get('H_')[0].copy())
# we'll first make the carbon atom at the origin
# we'll include gasteiger charges later
print('Example progress: Adding carbon atom at origin...')
c1 = s.particles.add(system.Particle(type=dreiding_C_, x=0, y=0, z=0, charge=0, molecule=m))
# now we'll add 4 hydrogen atoms bonded to our carbon atom
# these atoms will be placed randomly 1.5 angstroms from the carbon atom
# we'll optimize the structure using LAMMPS afterwords
# we supply the Dreiding forcefield object so that bond and angle types can be added as well
print('Example progress: Adding 4 hydrogen atoms at random positions bonded to the carbon atom...')
h1 = s.add_particle_bonded_to(system.Particle(type=dreiding_H_, charge=0, molecule=m), c1, f)
h2 = s.add_particle_bonded_to(system.Particle(type=dreiding_H_, charge=0, molecule=m), c1, f)
h3 = s.add_particle_bonded_to(system.Particle(type=dreiding_H_, charge=0, molecule=m), c1, f)
h4 = s.add_particle_bonded_to(system.Particle(type=dreiding_H_, charge=0, molecule=m), c1, f)
# let's add gasteiger charges
print('Example progress: Deriving Gasteiger charges...')
s.apply_charges(f, charges='gasteiger')
# right now there is no simulation box defined
# we'll define a box surrounding our methane molecule with a 10 angstrom padding
print('Example progress: Constructing Simulation box surrounding our new molecule...')
s.set_box(padding=10)
# before we optimize our structure, LAMMPS needs to know what type of
# pair, bond, and angle interactions we are using
# these are determined by the forcefield being used
s.pair_style='lj/cut'
s.bond_style='harmonic'
s.angle_style='harmonic'
# we'll perform energy minimization using the fire algorithm in LAMMPS
print('Example progress: Optimizing structure using LAMMPS...')
lmps.quick_min(s, min_style='fire', name='fire_min', etol=1e-10, ftol=1e-10)
# write xyz, YAML, LAMMPS data, and chemdoodle json files
print('Example progress: Saving structure to files...')
s.write_xyz('methane.xyz')
s.write_yaml('methane.yaml')
s.write_lammps('methane.lmps')
s.write_chemdoodle_json('methane.json')
print('Example progress: Complete!')
if __name__ == '__main__':
run()
|
polysimtools/pysimm
|
Examples/01_methane/dreiding/create.py
|
Python
|
mit
| 3,174
|
[
"LAMMPS"
] |
227af7e8be195e258d8e3df2acba5a842880433a03192905828925e06290f955
|
#
# Copyright (C) 2011 by Brian Weck
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
#
import sys, os.path, time, re
import urllib, urllib2, cookielib
import simplejson, HTMLParser
from urlparse import urlparse
from HTTPHeaderLogger import HTTPHeaderLoggerHandler
from . import log, config
class APIWrapper:
""" APIWrapper for reddit """
#
last_request_time = None
#
def __init__(self):
self.log = log.getLogger("cssbot.reddit.APIWrapper")
#
self.throttle = config.getFloat("reddit", "throttle", 2.5)
self.num_retries = config.getInt("reddit", "num_retries", 3)
self.retry_delay_sec = config.getFloat("reddit", "retry_delay_sec", 15)
self.user = config.get("reddit", "user")
self.password = config.get("reddit", "password")
#
self.cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj), HTTPHeaderLoggerHandler)
urllib2.install_opener(self.opener)
# https://github.com/reddit/reddit/wiki/API%3A-login
def login(self):
#
uri = "http://www.reddit.com/api/login/%s" % self.user
params = urllib.urlencode(dict(api_type="json", user=self.user, passwd=self.password))
j = self.make_request_json(uri, params)
#FIXME: throw errors?
errors = j["json"]["errors"]
if errors:
self.log.error( "Login failed: [%s] %s", errors[0][0], errors[0][1])
sys.exit(1)
self.modhash = j["json"]["data"]["modhash"]
self.log.debug( "modhash = %s", self.modhash )
self.cookie = j["json"]["data"]["cookie"]
self.log.debug( "cookie = %s", self.cookie )
self.log.debug( "cookiejar = %s", self.cj )
return True
# logout.
def logout(self):
self.cj.clear()
return True
#
#
def make_request_json(self, uri, params=None):
#
parts = urlparse(uri)
# scheme://netloc/path;parameters?query#fragment
_uri = parts.scheme + "://" + parts.netloc + parts.path
if not parts.path.endswith(".json"):
_uri += ".json"
if parts.params:
_uri += ";" + parts.params
if parts.query:
_uri += "?" + parts.query
if parts.fragment:
_uri += "#" + parts.fragment
#
content = self.make_request(_uri, params)
try:
return simplejson.loads(content)
except simplejson.decoder.JSONDecodeError, jde:
self.log.error("could not parse json response, stopping. content=\n%s", content)
sys.exit(1)
#
#
def make_request(self, uri, params=None):
#
now = time.time()
if self.last_request_time is not None:
if now < (self.last_request_time + self.throttle):
duration = (self.last_request_time + self.throttle) - now
self.log.debug("delaying %s seconds until next request", duration)
time.sleep(duration)
#
attempts = 1
while 1:
try:
self.log.debug("open uri: %s", uri)
req = urllib2.Request(uri, params)
handle = urllib2.urlopen(req)
data = handle.read()
self.last_request_time = now
return data
except IOError, e:
self.log.warn("failed to open uri: %s", uri)
if hasattr(e, "code"):
self.log.warn("We failed with error code - %s.", e.code)
#
if attempts > self.num_retries:
self.log.error("attempt to open uri %s failed %d times, exiting.", uri, attempts)
# alternatively, re-throw the error to catch at a higher level.
sys.exit(1)
#
attempts += 1
self.log.warn("retrying %s in %d sec", uri, self.retry_delay_sec)
time.sleep(self.retry_delay_sec)
# finally,
return None
def get_stylesheet(self, sub):
contents = self.make_request("http://www.reddit.com/r/%s/about/stylesheet" % sub)
if not contents:
self.log.error("could not get a current copy of the css, exiting")
sys.exit(1)
p = re.compile('<textarea rows="20" cols="20" id="stylesheet_contents" name="stylesheet_contents" >(.*?)</textarea>')
m = p.search(contents)
css = m.group(1)
h = HTMLParser.HTMLParser()
return h.unescape(css)
def save_stylesheet(self, sub, css):
d = dict( id="#subreddit_stylesheet",
op="save",
r=sub,
renderstyle="html",
stylesheet_contents=css,
uh=self.modhash )
params = urllib.urlencode(d)
return self.make_request("http://www.reddit.com/api/subreddit_stylesheet", params)
def get_comments(self, thing_id):
uri = "http://www.reddit.com/comments/%s" % thing_id
return self.make_request_json(uri)
def get_r_new(self, subreddit):
uri = "http://www.reddit.com/r/%s/new/?sort=new" % (subreddit)
return self.make_request_json(uri)
def get_r_new_before(self, subreddit, t3):
uri = "http://www.reddit.com/r/%s/new/?sort=new&before=%s" % (subreddit, t3)
return self.make_request_json(uri)
def get_r_new_after(self, subreddit, t3):
uri = "http://www.reddit.com/r/%s/new/?sort=new&after=%s" % (subreddit, t3)
return self.make_request_json(uri)
|
bweck/cssbot
|
cssbot/reddit.py
|
Python
|
mit
| 5,380
|
[
"Brian"
] |
9184d9a7269665a14190424e34d8e81028ae0fee73dd76d28a7a9a52208fc3c3
|
""" This is a test of the AuthDB. Requires authlib, pyjwt
It supposes that the DB is present and installed in DIRAC
"""
import time
import DIRAC
DIRAC.initialize() # Initialize configuration
payload = {
"sub": "user",
"iss": "issuer",
"iat": int(time.time()),
"exp": int(time.time()) + (12 * 3600),
"scope": "scope",
"setup": "setup",
"group": "my_group",
}
exp_payload = payload.copy()
exp_payload["iat"] = int(time.time()) - 10
exp_payload["exp"] = int(time.time()) - 10
from authlib.jose import jwt
from DIRAC.FrameworkSystem.DB.TokenDB import TokenDB
db = TokenDB()
def test_Token():
"""Try to revoke/save/get tokens"""
DToken = dict(
access_token=jwt.encode({"alg": "HS256"}, payload, "secret").decode("utf-8"),
refresh_token=jwt.encode({"alg": "HS256"}, payload, "secret").decode("utf-8"),
expires_at=int(time.time()) + 3600,
)
New_DToken = dict(
access_token=jwt.encode({"alg": "HS256"}, payload, "secret").decode("utf-8"),
refresh_token=jwt.encode({"alg": "HS256"}, payload, "secret").decode("utf-8"),
issued_at=int(time.time()),
expires_in=int(time.time()) + 3600,
)
Exp_DToken = dict(
access_token=jwt.encode({"alg": "HS256"}, exp_payload, "secret").decode("utf-8"),
refresh_token=jwt.encode({"alg": "HS256"}, exp_payload, "secret").decode("utf-8"),
expires_at=int(time.time()) - 10,
rt_expires_at=int(time.time()) - 10,
)
# Remove all tokens
result = db.removeToken(user_id=123)
assert result["OK"], result["Message"]
# Store tokens
result = db.updateToken(DToken.copy(), userID=123, provider="DIRAC", rt_expired_in=24)
assert result["OK"], result["Message"]
assert result["Value"] == []
# Expired token
result = db.updateToken(Exp_DToken.copy(), userID=123, provider="DIRAC", rt_expired_in=24)
assert not result["OK"]
# Check token
result = db.getTokenForUserProvider(userID=123, provider="DIRAC")
assert result["OK"], result["Message"]
assert result["Value"]["access_token"] == DToken["access_token"]
assert result["Value"]["refresh_token"] == DToken["refresh_token"]
# Store new tokens
result = db.updateToken(New_DToken.copy(), userID=123, provider="DIRAC", rt_expired_in=24)
assert result["OK"], result["Message"]
# Must return old tokens
assert len(result["Value"]) == 1
assert result["Value"][0]["access_token"] == DToken["access_token"]
assert result["Value"][0]["refresh_token"] == DToken["refresh_token"]
# Check token
result = db.getTokenForUserProvider(userID=123, provider="DIRAC")
assert result["OK"], result["Message"]
assert result["Value"]["access_token"] == New_DToken["access_token"]
assert result["Value"]["refresh_token"] == New_DToken["refresh_token"]
|
DIRACGrid/DIRAC
|
tests/Integration/Framework/Test_TokenDB.py
|
Python
|
gpl-3.0
| 2,859
|
[
"DIRAC"
] |
df394d7e8d1dc3c58a39dbd3f168a157f9ff67e2bd1041c6fd95fc282ccac231
|
"""
DIRAC.RequestManagementSystem.Agent package
"""
|
DIRACGrid/DIRAC
|
src/DIRAC/RequestManagementSystem/Agent/__init__.py
|
Python
|
gpl-3.0
| 55
|
[
"DIRAC"
] |
86bfae30968bdeff0caf1d4c9bec4f3a923bc490b35a6a11b76d661d02e31897
|
#
# _____ _____ _______ __ _ _______ ______ _______ _____
# | | |_____] |______ | \ | | |_____/ |_____| | |
# |_____| | |______ | \_| | | \_ | | __|__ |_____
#
# _______ _____ __ _ _ _ _______ ______ _______ _____ _____ __ _
# | | | | \ | \ / |______ |_____/ |______ | | | | \ |
# |_____ |_____| | \_| \/ |______ | \_ ______| __|__ |_____| | \_|
#
# http://www.lfd.uci.edu/~gohlke/pythonlibs/#pyshp
import shapefile
# http://www.lfd.uci.edu/~gohlke/pythonlibs/#pyproj
import pyproj
# http://www.lfd.uci.edu/~gohlke/pythonlibs/#requests
import requests
import hashlib, collections, csv, os, sys, zipfile
import json
import csv
# http://www.codeforamerica.org/specifications/trails/spec.html
TRAILS_URL = 'http://library.oregonmetro.gov/rlisdiscovery/trails.zip'
WGS84 = pyproj.Proj("+init=EPSG:4326") # LatLon with WGS84 datum used for geojson
ORSP = pyproj.Proj("+init=EPSG:2913", preserve_units=True) # datum used by Oregon Metro
STEWARDS = []
ORCA_SITES = {}
if not os.path.exists(os.getcwd()+'/output'):
"""
Create a directory to hold the output
"""
os.makedirs(os.getcwd()+'/output')
def get_duplicates(arr):
"""
helper function to check for duplicate ids
"""
dup_arr = arr[:]
for i in set(arr):
dup_arr.remove(i)
return list(set(dup_arr))
def download(path, file):
if not os.path.exists(os.getcwd()+'/src'):
os.makedirs(os.getcwd()+'/src')
with open(os.getcwd()+'/src/'+file+'.zip', 'wb') as handle:
response = requests.get(path, stream=True)
if not response.ok:
# Something went wrong
print "Failed to download "+file
sys.exit()
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
print 'Downloaded '+file
unzip(file)
print 'Unzipped '+file
def unzip(file):
zfile = zipfile.ZipFile(os.getcwd()+'/src/'+file+'.zip')
for name in zfile.namelist():
(dirname, filename) = os.path.split(name)
zfile.extract(name, os.getcwd()+'/src/')
zfile.close()
def get_steward_id(steward):
try:
id = [x['steward_id'] for x in STEWARDS if x["name"] == steward][0]
return id
except IndexError as e:
#Crap stewards
if steward=='Home Owner Association': return 9999 #private
if steward=='North Clackamas Parks and Recreation Department': return 58672 #should be district
if steward=='United States Fish & Wildlife' : return 43262
if steward=='Wood Village Parks & Recreation' : return 8348
if steward is None: return 9999 #private
return 9999
def compare_segment_arrays(a, b):
if len(a) != len(b): return False
for n in a:
if n in b:
continue
else:
return False
return True
def is_subset(a,b):
foo=True
for val in a:
if val in b:
continue
else:
#print val
foo= False
return foo
def process_trail_segments():
trail_segments = []
named_trails = []
# read the trails shapefile
reader = shapefile.Reader(os.getcwd()+'/src/trails.shp')
fields = reader.fields[1:]
field_names = [field[0] for field in fields]
#iterate trails
for sr in reader.shapeRecords():
atr = dict(zip(field_names, sr.record))
# we're only allowing open existing trails to pass
if atr['STATUS'].upper() == 'OPEN' and atr['SYSTEMTYPE'].upper() != 'OTHER' and atr['TRLSURFACE'] != 'Water':
props = collections.OrderedDict()
#effectively join to the stewards table
id = props['id'] = str(int(atr['TRAILID']))
props['steward_id'] = get_steward_id(atr['AGENCYNAME'])
props['motor_vehicles'] = 'no'
props['foot'] = 'yes' if atr['HIKE'] == 'Yes' else 'No'
props['bicycle'] = 'yes' if atr['ROADBIKE'] == 'Yes'\
or atr['MTNBIKE'] == 'Yes' else 'no'
props['horse'] = 'yes' if atr['EQUESTRIAN'] == 'Yes' else 'no'
props['ski'] = 'no'
# spec: "yes", "no", "permissive", "designated"
props['wheelchair'] = 'yes' if atr['ACCESSIBLE'] == 'Accessible' else 'no'
props['osm_tags'] = 'surface='+atr['TRLSURFACE']+';width='+atr['WIDTH']
# Assumes single part geometry == our (RLIS) trails.shp
n_geom = []
geom = sr.shape.__geo_interface__
if geom['type'] !='LineString':
print 'Encountered multipart...skipping'
continue
for point in geom['coordinates']:
n_geom.append(pyproj.transform(ORSP, WGS84, point[0], point[1]))
segment= collections.OrderedDict()
segment['type']='Feature'
segment['properties'] = props
segment['geometry'] = {"type":"LineString", "coordinates":n_geom}
trail_segments.append(segment)
if atr['TRAILNAME'] != None and ' ' not in atr['TRAILNAME']:
if len([x for x in named_trails if x["atomic_name"]==atr['TRAILNAME']+'|'+atr['COUNTY']])==0:
named_trails.append({'atomic_name': atr['TRAILNAME']+'|'+atr['COUNTY'], 'name':atr['TRAILNAME'],'segment_ids':[atr['TRAILID']]})
else:
[x for x in named_trails if x["atomic_name"]==atr['TRAILNAME']+'|'+atr['COUNTY']][0]['segment_ids'].append(atr['TRAILID'])
if atr['SYSTEMNAME'] != None and ' ' not in atr['SYSTEMNAME']:
if len([x for x in named_trails if x['atomic_name']==atr['SYSTEMNAME']])==0:
named_trails.append({'atomic_name': atr['SYSTEMNAME'], 'name':atr['SYSTEMNAME'],'segment_ids':[atr['TRAILID']]})
else:
[x for x in named_trails if x["atomic_name"]==atr['SYSTEMNAME']][0]['segment_ids'].append(atr['TRAILID'])
if atr['SHAREDNAME'] != None and ' ' not in atr['SHAREDNAME']:
if len([x for x in named_trails if x['atomic_name']==atr['SHAREDNAME']])==0:
named_trails.append({'atomic_name': atr['SHAREDNAME'], 'name':atr['SHAREDNAME'],'segment_ids':[atr['TRAILID']]})
else:
[x for x in named_trails if x["atomic_name"]==atr['SHAREDNAME']][0]['segment_ids'].append(atr['TRAILID'])
#Release the trails shapefile
reader = None
#step 1
#remove duplicate geometries in named_trails
all_arrays = []
for trail in named_trails: all_arrays.append(trail['segment_ids'])
#identify duplicate geometries
duplicates = [x for x in named_trails if len([y for y in all_arrays if compare_segment_arrays(x['segment_ids'],y)])>1]
glob_segs = None
counter = 0
for dup in duplicates:
if glob_segs is None or not compare_segment_arrays(dup['segment_ids'],glob_segs):
#find ur buddy
d = [x for x in duplicates if compare_segment_arrays(x['segment_ids'],dup['segment_ids'])]
glob_segs = dup['segment_ids']
to_remove = [x for x in d if '|' in x['atomic_name']]
if len(to_remove) == 1:
named_trails.remove(to_remove[0])
else:
print 'no piped atomic name... I dunno'
#step 2 - remove atomically stored trails (with county) that are pure
# subsets of a regional trail superset
glob_name = None
for trail in named_trails:
if glob_name is None or trail['name'] != glob_name:
dups = [x for x in named_trails if x['name']==trail['name']]
glob_name = trail['name']
#determine the dup with the most segs *heinous*
superset = max(enumerate(dups), key = lambda tup: len(tup[1]['segment_ids']))
superitem = [x for x in dups if x==superset[1]][0]
for dup in dups:
if len(dup['segment_ids']) != len(superitem['segment_ids']):
foo =is_subset(dup['segment_ids'], superitem['segment_ids'])
if foo and '|' in dup['atomic_name']:
#print 'Removed '+dup['atomic_name'] + ' from named_trails'
named_trails.remove(dup)
glob_name = trail['name']
#step 3 - remove atomically stored trails (with county) that are
# *impure* subsets of a regional trail superset
#this sucks
#So let's look for where the name matches the atomic name of an existing
#named trail - the assumption being that the atomic name of a regional
#trail will not include the pipe '|' and county
to_delete=[]
for trail in named_trails:
if '|' in trail['atomic_name']:
for test_trail in named_trails:
if trail['name'] == test_trail['atomic_name']:
#print trail['name'] + ' combined with regional trail'
#Insert whatever segments in trail that aren't in
#test_trail
for segment in trail['segment_ids']:
if segment not in test_trail['segment_ids']:
test_trail['segment_ids'].append(segment)
#append to to_delete
to_delete.append(trail)
#delete
for trail in to_delete:
named_trails.remove(trail)
#step 4 - assign named trail id from reference table
for trail in named_trails:
if '|' in trail['atomic_name']:
county = trail['atomic_name'].split('|')[1].strip()
name = trail['atomic_name'].split('|')[0].strip()
else: #don't need the county == blank
name = trail['atomic_name']
county = ''
id= [x for x in NAMED_TRAIL_IDS if x[1]==county and x[2]==name]
if len(id)==0:
print '*' +name+' || '+ county # no id in named_trails
else:
[x for x in named_trails if x['atomic_name']==trail['atomic_name']][0]['named_trail_id'] = id[0]
#step 5 - remove atomic name
for n in named_trails:
n.pop('atomic_name')
print ("Completed trails")
return trail_segments, named_trails
def process_areas():
# read the parks shapefile
reader = shapefile.Reader(os.getcwd()+'/src/orca_sites.shp') #this is actually ORCA_sites_beta
fields = reader.fields[1:]
field_names = [field[0] for field in fields]
areas = []
counter = 0
for sr in reader.shapeRecords():
if counter == 1000: break #Take the 1st 10,000 features, ORCA is a supermassive YKW
atr = dict(zip(field_names, sr.record))
# if atr['STATUS'] == 'Closed': #We don't want any closed sites to show up.
# continue
"""
SELECT *
FROM orca
WHERE county IN ( 'Clackamas', 'Multnomah', 'Washington' )
AND ( ( ownlev1 IN ( 'Private', 'Non-Profits' )
AND ( unittype IN ( 'Natural Area', 'Other' )
AND recreation = 'Yes' )
OR conservation = 'High' )
OR ( ownlev1 NOT IN ( 'Private', 'Non-Profits' )
AND ( unittype = 'Other'
AND ( recreation = 'Yes'
OR conservation IN ( 'High', 'Medium' ) )
OR unittype = 'Natural Area' ) )
OR ( ownlev2 = 'Non-profit Conservation' )
OR ( unittype = 'Park' ) )
"""
# if atr['COUNTY'] in ['Clackamas', 'Multnomah', 'Washington'] and ((atr['OWNLEV1'] in ['Private', 'Non-Profits'] and (atr['UNITTYPE'] in ['Natural Area', 'Other'] and atr['RECREATION']=='Yes') or atr['CONSERVATI']=='High') or (atr['OWNLEV1'] not in ['Private', 'Non-Profits'] and (atr['UNITTYPE']== 'Other' and (atr['RECREATION']=='Yes' or atr['CONSERVATI'] in ['High', 'Medium']) or atr['UNITTYPE'] == 'Natural Area') ) or atr['OWNLEV2'] == 'Non-profit Conservation' or atr['UNITTYPE']== 'Park'):
if 1:
props = collections.OrderedDict()
# if atr['MANAGER'] not in stewards.iterkeys():
# m = hashlib.sha224(atr['MANAGER']).hexdigest()
# agency_id = str(int(m[-6:], 16))
# stewards[atr['MANAGER']] = agency_id
geom = sr.shape.__geo_interface__
if geom['type'] == 'MultiPolygon':
polys=[]
for poly in geom['coordinates']:
rings = []
for ring in poly:
n_geom = []
for point in ring:
n_geom.append(pyproj.transform(ORSP, WGS84, point[0], point[1]))
rings.append(n_geom)
polys.append(rings)
new_geom = {"type":"MultiPolygon", "coordinates":polys}
else:
rings = []
for ring in geom['coordinates']:
n_geom = []
for point in ring:
n_geom.append(pyproj.transform(ORSP, WGS84, point[0], point[1]))
rings.append(n_geom)
new_geom = {"type":"Polygon", "coordinates":rings}
props['name'] = atr['SITENAME']
props['id'] = int(atr['DISSOLVEID'])
if props['id'] in ORCA_SITES:
props['steward_id'] = ORCA_SITES[props['id']]
else:
props['steward_id'] = 5127
props['url'] = ''
props['osm_tags'] = ''
_area= collections.OrderedDict()
_area['type']='Feature'
_area['properties'] = props
_area['geometry'] = new_geom
areas.append(_area)
counter +=1
# free up the shp file.
reader = None
return areas
if __name__ == "__main__":
#####################################################
# Download data from RLIS
#
# download(TRAILS_URL, 'trails')
#download(ORCA_URL, 'orca')
#
#####################################################
#####################################################
# Load Stewards into Python object
#
with open(os.getcwd() + "/output/stewards.csv", mode='r') as infile:
reader = csv.DictReader(infile, ['steward_id', 'name', 'url', 'phone', 'address','publisher', 'license']) #stewards.csv header
reader.next()
for row in reader:
STEWARDS.append(row)
for row in STEWARDS:
row['steward_id'] = int(row['steward_id'])
print "sucked up stewards"
#
#
#####################################################
#####################################################
# Load Named Trails into Python object
#
with open(os.getcwd() + "/ref/named_trails_lookup.csv", mode='r') as infile:
reader = csv.reader(infile)
reader.next() #skip header line
NAMED_TRAIL_IDS = list(reader)
for row in NAMED_TRAIL_IDS:
row[0] = int(row[0])
print "Sucked up Named trail ids"
#####################################################
# Load Named Trails into Python object
#
with open(os.getcwd() + "/ref/orca_sites_to_steward.csv", mode='r') as infile:
reader = csv.reader(infile)
reader.next() #skip header line
for row in reader:
# print row
ORCA_SITES[int(row[0])] = int(row[1])
print "Sucked up orca sites"
#
#
#####################################################
#####################################################
# Load objects and arrays with calls to core functions
#
trail_segments, named_trails = process_trail_segments()
######################################################
# write named_trails.csv
#
named_trails_out = open(os.getcwd() + "/output/named_trails.csv", "w")
named_trails_out.write('"name","segment_ids","id","description","part_of"\n')
for named_trail in named_trails:
try: #horrible hack for trails that are in the current (2014 Q4) Trails download in RLIS
#discovery that are not in named_trails.csv because they were removed or whatever...
named_trails_out.write(named_trail['name']+","+ ";".join(str(int(x)) for x in named_trail['segment_ids'])+","+ str(named_trail['named_trail_id'][0]) + ",,\n")
except:
pass
named_trails_out.close()
print 'Created named_trails.csv'
#
########################################################
########################################################
# write trail_segments.geojson
#
trail_segments_out = open(os.getcwd() + "/output/trail_segments.geojson", "w")
trail_segments_out.write(json.dumps({"type": "FeatureCollection",\
"features": trail_segments}, indent=2) + "\n")
trail_segments_out.close()
print 'Created trail_segments.geojson'
#
########################################################
# sys.exit(1)
areas= process_areas()
########################################################
# write areas.geojson
#
areas_out = open(os.getcwd()+"/output/areas.geojson", "w")
areas_out.write(json.dumps({"type": "FeatureCollection",\
"features": areas}, indent=2, encoding="Latin1") + "\n")
areas_out.close()
print 'Created areas.geojson'
#
########################################################
print 'Process complete'
|
sainsb/RLIS_Trails_to_OT
|
RLISTrails2OT.py
|
Python
|
mit
| 17,249
|
[
"ORCA"
] |
2555083abbb01b36d356ff13a66ed4d6b38030c0e3162b057de7a3bd18a8cfb3
|
# (c) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils._text import to_text
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.plugins.lookup import LookupBase
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
validate_certs = kwargs.get('validate_certs', True)
split_lines = kwargs.get('split_lines', True)
use_proxy = kwargs.get('use_proxy', True)
ret = []
for term in terms:
display.vvvv("url lookup connecting to %s" % term)
try:
response = open_url(term, validate_certs=validate_certs, use_proxy=use_proxy)
except HTTPError as e:
raise AnsibleError("Received HTTP error for %s : %s" % (term, str(e)))
except URLError as e:
raise AnsibleError("Failed lookup url for %s : %s" % (term, str(e)))
except SSLValidationError as e:
raise AnsibleError("Error validating the server's certificate for %s: %s" % (term, str(e)))
except ConnectionError as e:
raise AnsibleError("Error connecting to %s: %s" % (term, str(e)))
if split_lines:
for line in response.read().splitlines():
ret.append(to_text(line))
else:
ret.append(to_text(response.read()))
return ret
|
rmfitzpatrick/ansible
|
lib/ansible/plugins/lookup/url.py
|
Python
|
gpl-3.0
| 2,419
|
[
"Brian"
] |
724052ba80a880d857a4ef66310f24adac9c63a825f9072f051d47ff8f89ba58
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
import datetime
import django_tables2 as tables
import six
from django.apps import apps
from django.contrib import messages
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.template.defaultfilters import wordwrap
from django.urls import reverse
import karaage.common as util
from karaage.common import log
from karaage.common.decorators import admin_required, login_required
from karaage.people.models import Person
from .forms import (
AddPackageForm,
LicenseForm,
SoftwareCategoryForm,
SoftwareForm,
SoftwareVersionForm,
)
from .models import (
Software,
SoftwareCategory,
SoftwareLicense,
SoftwareLicenseAgreement,
SoftwareVersion,
)
from .tables import SoftwareFilter, SoftwareTable
if apps.is_installed("karaage.plugins.kgsoftware.applications"):
from karaage.plugins.kgapplications.tables import ApplicationTable
from .applications.views import new_application
from .applications.models import SoftwareApplication
def is_application_pending(person, software_license):
query = SoftwareApplication.objects.get_for_applicant(person)
query = query.filter(software_license=software_license)
if query.count() > 0:
return True
def get_applications(software_license):
applications = SoftwareApplication.objects.filter(
software_license=software_license)
applications = applications.exclude(state='C')
return applications
def get_applications_for_person(person, software_license):
applications = SoftwareApplication.objects.get_for_applicant(person)
applications = applications.filter(software_license=software_license)
return applications
def get_application_table(request, applications):
applications_table = ApplicationTable(applications)
config = tables.RequestConfig(request, paginate={"per_page": 5})
config.configure(applications_table)
return applications_table
else:
from django.http import HttpResponseBadRequest
def is_application_pending(person, software_license):
return False
def get_applications(software_license):
return []
def get_applications_for_person(person, software_license):
return []
def get_application_table(request, applications):
return None
@login_required
def new_application(request, software_license):
return HttpResponseBadRequest("<h1>Restricted Software denied.</h1>")
@login_required
def profile_software(request):
person = request.user
agreement_list = person.softwarelicenseagreement_set.all()
return render(
template_name='kgsoftware/profile_software.html',
context={'person': person, 'agreement_list': agreement_list},
request=request)
@login_required
def software_list(request):
if not util.is_admin(request):
return _software_list_non_admin(request)
queryset = Software.objects.all().select_related()
q_filter = SoftwareFilter(request.GET, queryset=queryset)
table = SoftwareTable(q_filter.qs)
tables.RequestConfig(request).configure(table)
spec = []
for name, value in six.iteritems(q_filter.form.cleaned_data):
if value is not None and value != "":
name = name.replace('_', ' ').capitalize()
spec.append((name, value))
return render(
template_name='kgsoftware/software_list.html',
context={
'table': table,
'filter': q_filter,
'spec': spec,
'title': "Software list",
},
request=request)
@login_required
def _software_list_non_admin(request):
person = request.user
query = Software.objects.filter(softwarelicense__isnull=False).distinct()
software_list = []
for software in query:
data = {'software': software}
license_agreements = SoftwareLicenseAgreement.objects.filter(
person=person, license__software=software)
if license_agreements.count() > 0:
la = license_agreements.latest()
data['accepted'] = True
data['accepted_date'] = la.date
software_license = software.get_current_license()
data['pending'] = is_application_pending(person, software_license)
software_list.append(data)
return render(
template_name='kgsoftware/add_package_list.html',
context=locals(),
request=request)
@login_required
def software_detail(request, software_id):
software = get_object_or_404(Software, pk=software_id)
software_license = software.get_current_license()
person = request.user
license_agreements = SoftwareLicenseAgreement.objects \
.filter(person=person, license=software_license)
agreement = None
if license_agreements.count() > 0:
agreement = license_agreements.latest()
# we only list applications for current software license
applications = get_applications(software_license)
application_table = get_application_table(request, applications)
open_applications = get_applications_for_person(person, software_license)
if agreement is None and software_license is not None \
and len(open_applications) == 0 and request.method == 'POST':
if software.restricted and not util.is_admin(request):
log.add(software, "New application created for %s" % request.user)
return new_application(request, software_license)
SoftwareLicenseAgreement.objects.create(
person=person,
license=software_license,
date=datetime.datetime.today(),
)
person.add_group(software.group)
log.add(
software,
"Approved join (not restricted) for %s" % request.user)
messages.success(request, "Approved access to %s." % software)
return HttpResponseRedirect(reverse('kg_profile_software'))
return render(
template_name='kgsoftware/software_detail.html',
context=locals(),
request=request)
@admin_required
def add_package(request):
if request.method == 'POST':
form = AddPackageForm(request.POST)
if form.is_valid():
software = form.save()
return HttpResponseRedirect(software.get_absolute_url())
else:
form = AddPackageForm()
return render(
template_name='kgsoftware/add_package_form.html',
context=locals(),
request=request)
@admin_required
def software_edit(request, software_id):
from karaage.common.create_update import update_object
return update_object(
request, object_id=software_id, model=Software,
form_class=SoftwareForm)
@admin_required
def software_delete(request, software_id):
from karaage.common.create_update import delete_object
return delete_object(
request, post_delete_redirect=reverse('kg_software_list'),
object_id=software_id, model=Software)
@admin_required
def software_logs(request, software_id):
obj = get_object_or_404(Software, pk=software_id)
breadcrumbs = [
("Softwares", reverse("kg_software_list")),
(six.text_type(obj), reverse("kg_software_detail", args=[obj.pk]))
]
return util.log_list(request, breadcrumbs, obj)
@admin_required
def add_comment(request, software_id):
obj = get_object_or_404(Software, pk=software_id)
breadcrumbs = [
("Softwares", reverse("kg_software_list")),
(six.text_type(obj), reverse("kg_software_detail", args=[obj.pk]))
]
return util.add_comment(request, breadcrumbs, obj)
@login_required
def license_detail(request, license_id):
l = get_object_or_404(SoftwareLicense, pk=license_id)
return render(
template_name='kgsoftware/license_detail.html',
context=locals(),
request=request)
@admin_required
def add_license(request, software_id):
software = get_object_or_404(Software, pk=software_id)
form = LicenseForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
l = form.save()
log.add(software, "license: %s added" % l)
return HttpResponseRedirect(software.get_absolute_url())
return render(
template_name='kgsoftware/license_form.html',
context=locals(),
request=request)
@admin_required
def edit_license(request, license_id):
l = get_object_or_404(SoftwareLicense, pk=license_id)
software = l.software
form = LicenseForm(request.POST or None, instance=l)
if request.method == 'POST':
if form.is_valid():
form.save()
return HttpResponseRedirect(software.get_absolute_url())
return render(
template_name='kgsoftware/license_form.html',
context=locals(),
request=request)
@admin_required
def license_delete(request, license_id):
from karaage.common.create_update import delete_object
return delete_object(
request,
post_delete_redirect=reverse('kg_software_list'),
object_id=license_id, model=SoftwareLicense)
@admin_required
def delete_version(request, version_id):
version = get_object_or_404(SoftwareVersion, pk=version_id)
if request.method == 'POST':
version.delete()
log.delete(version.software, 'Deleted version: %s' % version)
messages.success(
request, "Version '%s' was deleted succesfully" % version)
return HttpResponseRedirect(version.get_absolute_url())
return render(
template_name='kgsoftware/version_confirm_delete.html',
context=locals(),
request=request)
@admin_required
def add_version(request, software_id):
software = get_object_or_404(Software, pk=software_id)
form = SoftwareVersionForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
version = form.save()
return HttpResponseRedirect(software.get_absolute_url())
return render(
template_name='kgsoftware/version_form.html',
context=locals(),
request=request)
@admin_required
def edit_version(request, version_id):
version = get_object_or_404(SoftwareVersion, pk=version_id)
software = version.software
form = SoftwareVersionForm(request.POST or None, instance=version)
if request.method == 'POST':
if form.is_valid():
version = form.save()
return HttpResponseRedirect(software.get_absolute_url())
return render(
template_name='kgsoftware/version_form.html',
context=locals(),
request=request)
@admin_required
def category_list(request):
category_list = SoftwareCategory.objects.all()
return render(
template_name='kgsoftware/category_list.html',
context={'category_list': category_list},
request=request)
@admin_required
def category_create(request):
from karaage.common.create_update import create_object
return create_object(
request, model=SoftwareCategory,
form_class=SoftwareCategoryForm)
@admin_required
def category_edit(request, category_id):
from karaage.common.create_update import update_object
return update_object(
request, object_id=category_id, model=SoftwareCategory,
form_class=SoftwareCategoryForm)
@admin_required
def remove_member(request, software_id, person_id):
software = get_object_or_404(Software, pk=software_id)
person = get_object_or_404(Person, pk=person_id)
if request.method == 'POST':
person.remove_group(software.group)
messages.success(request, "User '%s' removed successfuly" % person)
return HttpResponseRedirect(software.get_absolute_url())
return render(
template_name='kgsoftware/person_confirm_remove.html',
context=locals(),
request=request)
@login_required
def license_txt(request, software_id):
software = get_object_or_404(Software, pk=software_id)
software_license = software.get_current_license()
if software_license is None:
raise Http404('No license found for software')
return HttpResponse(
wordwrap(software_license.text, 80),
content_type="text/plain")
|
brianmay/karaage
|
karaage/plugins/kgsoftware/views.py
|
Python
|
gpl-3.0
| 13,065
|
[
"Brian"
] |
c00cde309b8138822587508f8a1e588448ee2db36e36dc56bdb6a6a8ebd7de82
|
# $Id$
#
# Copyright (C) 2007 Greg Landrum
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
import sys
from rdkit import Chem
class FastSDMolSupplier(object):
""" A wrapper around an SDMolSupplier that precomputes and stores
molecular indices (via text processing) to allow quick length
calculations and random access.
NOTE that this class needs to have the entire SD data in memory,
so it's probably not particularly useful with large files.
"""
suppl=None
data=None
sanitize=True
def __init__(self,fileN=None,data=None,sanitize=True,removeHs=True):
if fileN:
data = open(fileN,'r').read()
self.sanitize=sanitize
self.removeHs=removeHs
if data:
data = data.replace('\r\n','\n')
self.init(data)
def init(self,data,recogTxt='$$$$\n'):
if not data:
raise ValueError,'no data'
# FIX: it'd be nice to not be caching data locally like this, but it's the easiest
# way to handle pickle support.
self.data=data
self.suppl = Chem.SDMolSupplier()
self.suppl.SetData(data,sanitize=self.sanitize,removeHs=self.removeHs)
self._pos = [0]
p = 0
while 1:
try:
p = data.index(recogTxt,p+1)
p+=len(recogTxt)
except:
break
else:
self._pos.append(p)
self._pos.pop(-1)
self.suppl._SetStreamIndices(self._pos)
self._idx=0
def GetItemText(self,idx):
startOfItem = self._pos[idx]
if idx+1<len(self._pos):
endOfItem = self._pos[idx+1]
else:
endOfItem = -1
return self.data[startOfItem:endOfItem]
def reset(self):
self.suppl.reset()
self._idx=0
# ----------------------------------------------------------------
# support random access and an iterator interface:
def __iter__(self):
self.suppl.reset()
return self
def next(self):
self._idx+=1
return self.suppl.next()
def __len__(self):
return len(self.suppl)
def __getitem__(self,idx):
return self.suppl[idx]
|
rdkit/rdkit-orig
|
rdkit/Chem/FastSDMolSupplier.py
|
Python
|
bsd-3-clause
| 2,187
|
[
"RDKit"
] |
90c9d0dc673aac279de8e6ffbba0ab920887afe48e9b10a76f94757cda0c1f96
|
# -*- coding: utf-8 -*-
# MolMod is a collection of molecular modelling tools for python.
# Copyright (C) 2007 - 2019 Toon Verstraelen <Toon.Verstraelen@UGent.be>, Center
# for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all rights
# reserved unless otherwise stated.
#
# This file is part of MolMod.
#
# MolMod is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# MolMod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
"""Persistance, i.e. storage on disk, for objects with numerical attributes"""
from __future__ import print_function
import numpy as np
from molmod.io.common import FileFormatError
__all__ = ["NumberState"]
class StateAttr(object):
"""Base class for NumberState attributes"""
def __init__(self, owner, name):
"""
Arguments:
| ``owner`` -- the instance to read the attribute from
| ``name`` -- the name of the attribute
"""
self.owner = owner
self.name = name
def get(self, copy=False):
"""Return the value of the attribute"""
raise NotImplementedError
def get_kind(self, value):
"""Return the kind (type) of the attribute"""
raise NotImplementedError
def set(self, value):
"""Set the value of the attribute"""
raise NotImplementedError
class ScalarAttr(StateAttr):
"""A scalar attribute for NumberState objects"""
def get(self, copy=False):
"""Return the value of the attribute"""
return getattr(self.owner, self.name)
def get_kind(self, value):
"""Return the kind (type) of the attribute"""
if isinstance(value, float):
return 'f'
elif isinstance(value, int):
return 'i'
else:
raise ValueError("Only integer or floating point values can be stored.")
def set(self, value):
"""Set the value of the attribute"""
setattr(self.owner, self.name, value)
def dump(self, f, name):
"""Write the attribute to a file-like object"""
# print the header line
value = self.get()
kind = self.get_kind(value)
print("% 40s kind=%s value=%s" % (name, kind, value), file=f)
class ArrayAttr(StateAttr):
"""An array attribute for the NumberState object"""
def __init__(self, owner, name):
"""Initialize a ArrayAttr object
Arguments:
``owner`` -- the instance to read the attribute from
``name`` -- the name of the attribute
"""
StateAttr.__init__(self, owner, name)
array = self.get()
if array.dtype.fields is not None:
raise ValueError("Record arrays are not supported yet.")
def get(self, copy=False):
"""Return the value of the attribute"""
array = getattr(self.owner, self.name)
if copy:
return array.copy()
else:
return array
def get_kind(self, value):
"""Return the kind (type) of the attribute"""
return value.dtype.kind
def set(self, value):
"""Set the value of the attribute"""
array = self.get()
array[:] = value
def dump(self, f, name):
"""Write the attribute to a file-like object"""
array = self.get()
# print the header line
print("% 40s kind=%s shape=(%s)" % (
name,
array.dtype.kind,
",".join([str(int(size_axis)) for size_axis in array.shape]),
), file=f)
# print the numbers
counter = 0
for value in array.flat:
counter += 1
print("% 20s" % value, end=' ', file=f)
if counter % 4 == 0:
print(file=f)
if counter % 4 != 0:
print(file=f)
def load(self, f, skip):
"""Load the array data from a file-like object"""
array = self.get()
counter = 0
counter_limit = array.size
convert = array.dtype.type
while counter < counter_limit:
line = f.readline()
words = line.split()
for word in words:
if counter >= counter_limit:
raise FileFormatError("Wrong array data: too many values.")
if not skip:
array.flat[counter] = convert(word)
counter += 1
class NumberState(object):
"""Component class for data structures with human-readable persistence.
The format used to save and load the object is similar to a formatted
checkpoint file from the Gaussian package. Some additional info is
stored such as the shape of an array and the exact data type of the
array elements.
The attributes that contain data to be read from or to be written to
files are set up in the constructor of the owner class. This is a
typical simple example::
>>> class Foo(object):
... def __init__(self, a, b):
... self.a = a
... self.b = b
... self.state = NumberState(self, ["a", "b"])
In this example a is an array and b is a single scalar. One can now
read/write these attributes to a file as follows:
>>> foo = Foo(a, b)
>>> foo.state.dump("somefile.txt")
>>> foo.state.load("somefile.txt")
"""
def __init__(self, owner, names):
"""
Arguments:
| ``owner`` -- the object whose attributes are dumped and loaded
| ``names`` -- a list of attribute names to dump and load
"""
self._owner = owner
self._fields = {}
for name in names:
value = getattr(owner, name)
if isinstance(value, np.ndarray):
self._register(name, ArrayAttr)
elif isinstance(value, int) or isinstance(value, float):
self._register(name, ScalarAttr)
else:
raise TypeError("Can not handle attribute %s=%s" % (name, value))
def _register(self, name, AttrCls):
"""Register a new attribute to take care of with dump and load
Arguments:
| ``name`` -- the name to be used in the dump file
| ``AttrCls`` -- an attr class describing the attribute
"""
if not issubclass(AttrCls, StateAttr):
raise TypeError("The second argument must a StateAttr instance.")
if len(name) > 40:
raise ValueError("Name can count at most 40 characters.")
self._fields[name] = AttrCls(self._owner, name)
def get(self, subset=None):
"""Return a dictionary object with the registered fields and their values
Optional rgument:
| ``subset`` -- a list of names to restrict the number of fields
in the result
"""
if subset is None:
return dict((name, attr.get(copy=True)) for name, attr in self._fields.items())
else:
return dict((name, attr.get(copy=True)) for name, attr in self._fields.items() if name in subset)
def set(self, new_fields, subset=None):
"""Assign the registered fields based on a dictionary
Argument:
| ``new_fields`` -- the dictionary with the data to be assigned to
the attributes
Optional argument:
| ``subset`` -- a list of names to restrict the fields that are
effectively overwritten
"""
for name in new_fields:
if name not in self._fields and (subset is None or name in subset):
raise ValueError("new_fields contains an unknown field '%s'." % name)
if subset is not None:
for name in subset:
if name not in self._fields:
raise ValueError("name '%s' in subset is not a known field in self._fields." % name)
if name not in new_fields:
raise ValueError("name '%s' in subset is not a known field in new_fields." % name)
if subset is None:
if len(new_fields) != len(self._fields):
raise ValueError("new_fields contains too many fields.")
for name, attr in self._fields.items():
if name in subset:
attr.set(new_fields[name])
def dump(self, filename):
"""Dump the registered fields to a file
Argument:
| ``filename`` -- the file to write to
"""
with open(filename, "w") as f:
for name in sorted(self._fields):
self._fields[name].dump(f, name)
def load(self, filename, subset=None):
"""Load data into the registered fields
Argument:
| ``filename`` -- the filename to read from
Optional argument:
| ``subset`` -- a list of field names that are read from the file.
If not given, all data is read from the file.
"""
with open(filename, "r") as f:
name = None
num_names = 0
while True:
# read a header line
line = f.readline()
if len(line) == 0:
break
# process the header line
words = line.split()
name = words[0]
attr = self._fields.get(name)
if attr is None:
raise FileFormatError("Wrong header: unknown field %s" % name)
if not words[1].startswith("kind="):
raise FileFormatError("Malformatted array header line. (kind)")
kind = words[1][5:]
expected_kind = attr.get_kind(attr.get())
if kind != expected_kind:
raise FileFormatError("Wrong header: kind of field %s does not match. Got %s, expected %s" % (name, kind, expected_kind))
skip = ((subset is not None) and (name not in subset))
print(words)
if (words[2].startswith("shape=(") and words[2].endswith(")")):
if not isinstance(attr, ArrayAttr):
raise FileFormatError("field '%s' is not an array." % name)
shape = words[2][7:-1]
if shape[-1] == ', ':
shape = shape[:-1]
try:
shape = tuple(int(word) for word in shape.split(","))
except ValueError:
raise FileFormatError("Malformatted array header. (shape)")
expected_shape = attr.get().shape
if shape != expected_shape:
raise FileFormatError("Wrong header: shape of field %s does not match. Got %s, expected %s" % (name, shape, expected_shape))
attr.load(f, skip)
elif words[2].startswith("value="):
if not isinstance(attr, ScalarAttr):
raise FileFormatError("field '%s' is not a single value." % name)
if not skip:
if kind == 'i':
attr.set(int(words[2][6:]))
else:
attr.set(float(words[2][6:]))
else:
raise FileFormatError("Malformatted array header line. (shape/value)")
num_names += 1
if num_names != len(self._fields) and subset is None:
raise FileFormatError("Some fields are missing in the file.")
|
molmod/molmod
|
molmod/io/number_state.py
|
Python
|
gpl-3.0
| 12,169
|
[
"Gaussian"
] |
cce0f68489873dceed8b9c10767e0760ebe45cf1baec8cd4d2c8d5fe39507007
|
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2013, Yung-Yu Chen <yyc@solvcon.net>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the SOLVCON nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Material definition.
"""
import numpy as np
from solvcon import gendata
#: Registry singleton.
mltregy = gendata.TypeNameRegistry()
class MaterialMeta(type):
"""
Meta class for material class.
"""
def __new__(cls, name, bases, namespace):
newcls = super(MaterialMeta, cls).__new__(cls, name, bases, namespace)
# register.
mltregy.register(newcls)
return newcls
class Material(metaclass=MaterialMeta):
"""Material properties. The constitutive relation needs not be symmetric.
"""
#: :py:class:`list` of :py:class:`tuple` for indices where the content
#: should be zero.
_zeropoints_ = []
K = np.array([ [
[1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0],
], [
[0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
], [
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
], ], dtype='float64')
def __init__(self, rho=None, al=None, be=None, ga=None, **kw):
assert None is not rho
assert None is not al
assert None is not be
assert None is not ga
#: Density.
self.rho = rho
#: Alpha angle.
self.al = al
#: Beta angle.
self.be = be
#: Gamma angle.
self.ga = ga
# set stiffness matrix.
origstiff = np.empty((6,6), dtype='float64')
origstiff.fill(0.0)
for key in kw.keys(): # becaues I pop out the key.
if len(key) == 4 and key[:2] == 'co':
try:
i = int(key[2])-1
j = int(key[3])-1
except:
continue
assert i < origstiff.shape[0]
assert j < origstiff.shape[1]
val = kw.pop(key)
origstiff[i,j] = val
#: Stiffness matrix in the crystal coordinate.
self.origstiff = origstiff
# check for zeros.
self._check_origstiffzero(self.origstiff)
# compute the stiffness matrix in the transformed global coordinate
# system.
bondmat = self.get_bondmat()
#: Stiffness matrix in the transformed global coordinate.
self.stiff = np.dot(bondmat, np.dot(self.origstiff, bondmat.T))
super(Material, self).__init__(**kw)
def __getattr__(self, key):
if len(key) == 4 and key[:2] == 'co':
i = int(key[2])
j = int(key[3])
if 1 <= i <= 6 and 1 <= j <= 6:
return self.origstiff[i-1,j-1]
elif len(key) == 3 and key[0] == 'c':
i = int(key[1])
j = int(key[2])
if 1 <= i <= 6 and 1 <= j <= 6:
return self.stiff[i-1,j-1]
else:
raise AttributeError
def __str__(self):
return '[%s: al=%.2f be=%.2f ga=%.2f (deg)]' % (self.__class__.__name__,
self.al/(np.pi/180), self.be/(np.pi/180), self.ga/(np.pi/180))
@classmethod
def _check_origstiffzero(cls, origstiff):
"""
Check for zero in original stiffness matrix.
@note: no assumed symmetry.
"""
for i, j in cls._zeropoints_:
assert origstiff[i,j] == 0.0
def get_rotmat(self):
"""
Coordinate transformation matrix for three successive rotations through
the Euler angles.
@return: the transformation matrix.
@rtype: numpy.ndarray
"""
al = self.al; be = self.be; ga = self.ga
almat = np.array([
[np.cos(al), np.sin(al), 0],
[-np.sin(al), np.cos(al), 0],
[0, 0, 1],
], dtype='float64')
bemat = np.array([
[1, 0, 0],
[0, np.cos(be), np.sin(be)],
[0, -np.sin(be), np.cos(be)],
], dtype='float64')
gamat = np.array([
[np.cos(ga), np.sin(ga), 0],
[-np.sin(ga), np.cos(ga), 0],
[0, 0, 1],
], dtype='float64')
return np.dot(gamat, np.dot(bemat, almat))
def get_bondmat(self):
"""
The Bond's matrix M as a shorthand of coordinate transformation for the
6-component stress vector.
@return: the Bond's matrix.
@rtype: numpy.ndarray
"""
rotmat = self.get_rotmat()
bond = np.empty((6,6), dtype='float64')
# upper left.
bond[:3,:3] = rotmat[:,:]**2
# upper right.
bond[0,3] = 2*rotmat[0,1]*rotmat[0,2]
bond[0,4] = 2*rotmat[0,2]*rotmat[0,0]
bond[0,5] = 2*rotmat[0,0]*rotmat[0,1]
bond[1,3] = 2*rotmat[1,1]*rotmat[1,2]
bond[1,4] = 2*rotmat[1,2]*rotmat[1,0]
bond[1,5] = 2*rotmat[1,0]*rotmat[1,1]
bond[2,3] = 2*rotmat[2,1]*rotmat[2,2]
bond[2,4] = 2*rotmat[2,2]*rotmat[2,0]
bond[2,5] = 2*rotmat[2,0]*rotmat[2,1]
# lower left.
bond[3,0] = rotmat[1,0]*rotmat[2,0]
bond[3,1] = rotmat[1,1]*rotmat[2,1]
bond[3,2] = rotmat[1,2]*rotmat[2,2]
bond[4,0] = rotmat[2,0]*rotmat[0,0]
bond[4,1] = rotmat[2,1]*rotmat[0,1]
bond[4,2] = rotmat[2,2]*rotmat[0,2]
bond[5,0] = rotmat[0,0]*rotmat[1,0]
bond[5,1] = rotmat[0,1]*rotmat[1,1]
bond[5,2] = rotmat[0,2]*rotmat[1,2]
# lower right.
bond[3,3] = rotmat[1,1]*rotmat[2,2] + rotmat[1,2]*rotmat[2,1]
bond[3,4] = rotmat[1,0]*rotmat[2,2] + rotmat[1,2]*rotmat[2,0]
bond[3,5] = rotmat[1,1]*rotmat[2,0] + rotmat[1,0]*rotmat[2,1]
bond[4,3] = rotmat[0,1]*rotmat[2,2] + rotmat[0,2]*rotmat[2,1]
bond[4,4] = rotmat[0,0]*rotmat[2,2] + rotmat[0,2]*rotmat[2,0]
bond[4,5] = rotmat[0,1]*rotmat[2,0] + rotmat[0,0]*rotmat[2,1]
bond[5,3] = rotmat[0,1]*rotmat[1,2] + rotmat[0,2]*rotmat[1,1]
bond[5,4] = rotmat[0,0]*rotmat[1,2] + rotmat[0,2]*rotmat[1,0]
bond[5,5] = rotmat[0,1]*rotmat[1,0] + rotmat[0,0]*rotmat[1,1]
return bond
def get_jacos(self):
"""
Obtain the Jacobian matrices for the solid.
@param K: the K matrix.
@type K: numpy.ndarray
@return: the Jacobian matrices
@rtype: numpy.ndarray
"""
rho = self.rho
sf = self.stiff
jacos = np.zeros((3,9,9), dtype='float64')
for idm in range(3):
K = self.K[idm]
jaco = jacos[idm]
jaco[:3,3:] = K/(-rho) # the upper right submatrix.
jaco[3:,:3] = -np.dot(sf, K.T) # the lower left submatrix.
return jacos
################################################################################
# Begin material symmetry group.
class Triclinic(Material):
"""
The stiffness matrix has to be symmetric.
"""
_zeropoints_ = []
def __init__(self, *args, **kw):
for key in kw.keys(): # becaues I modify the key.
if len(key) == 4 and key[:2] == 'co':
try:
i = int(key[2])
j = int(key[3])
except:
continue
symkey = 'co%d%d' % (j, i)
if i != j:
assert symkey not in kw
kw[symkey] = kw[key]
super(Triclinic, self).__init__(*args, **kw)
@classmethod
def _check_origstiffzero(cls, origstiff):
for i, j in cls._zeropoints_:
assert origstiff[i,j] == origstiff[j,i] == 0.0
class Monoclinic(Triclinic):
_zeropoints_ = [
(0,3), (0,5),
(1,3), (1,5),
(2,3), (2,5),
(3,4), (4,5),
]
class Orthorhombic(Triclinic):
_zeropoints_ = [
(0,3), (0,4), (0,5),
(1,3), (1,4), (1,5),
(2,3), (2,4), (2,5),
(3,4), (3,5), (4,5),
]
class Tetragonal(Triclinic):
_zeropoints_ = [
(0,3), (0,4),
(1,3), (1,4),
(2,3), (2,4), (2,5),
(3,4), (3,5), (4,5),
]
def __init__(self, *args, **kw):
kw['co22'] = kw['co11']
kw['co23'] = kw['co13']
kw['co26'] = -kw.get('co16', 0.0)
kw['co55'] = kw['co44']
super(Tetragonal, self).__init__(*args, **kw)
class Trigonal(Triclinic):
_zeropoints_ = [
(0,5), (1,5),
(2,3), (2,4), (2,5),
(3,4),
]
def __init__(self, *args, **kw):
kw['co15'] = -kw.get('co25', 0.0)
kw['co22'] = kw['co11']
kw['co23'] = kw['co13']
kw['co24'] = -kw.get('co14', 0.0)
kw['co46'] = kw.get('co25', 0.0)
kw['co55'] = kw['co44']
kw['co56'] = kw.get('co14', 0.0)
kw['co66'] = (kw['co11'] - kw['co12'])/2
super(Trigonal, self).__init__(*args, **kw)
class Hexagonal(Trigonal):
_zeropoints_ = [
(0,3), (0,4), (0,5),
(1,3), (1,4), (1,5),
(2,3), (2,4), (2,5),
(3,4), (3,5), (4,5),
]
class Cubic(Triclinic):
_zeropoints_ = [
(0,3), (0,4), (0,5),
(1,3), (1,4), (1,5),
(2,3), (2,4), (2,5),
(3,4), (3,5), (4,5),
]
def __init__(self, *args, **kw):
kw['co13'] = kw['co12']
kw['co22'] = kw['co11']
kw['co23'] = kw['co12']
kw['co33'] = kw['co11']
kw['co55'] = kw['co44']
kw['co66'] = kw['co44']
super(Cubic, self).__init__(*args, **kw)
class Isotropic(Triclinic):
_zeropoints_ = [
(0,3), (0,4), (0,5),
(1,3), (1,4), (1,5),
(2,3), (2,4), (2,5),
(3,4), (3,5), (4,5),
]
def __init__(self, *args, **kw):
kw['co12'] = kw['co11']-2*kw['co44']
kw['co13'] = kw['co11']-2*kw['co44']
kw['co22'] = kw['co11']
kw['co23'] = kw['co11']-2*kw['co44']
kw['co33'] = kw['co11']
kw['co55'] = kw['co44']
kw['co66'] = kw['co44']
super(Isotropic, self).__init__(*args, **kw)
# End material symmetry group.
################################################################################
################################################################################
# Begin real material properties.
class GaAs(Cubic):
def __init__(self, *args, **kw):
kw.setdefault('rho', 5307.0)
kw.setdefault('co11', 11.88e10)
kw.setdefault('co12', 5.38e10)
kw.setdefault('co44', 5.94e10)
super(GaAs, self).__init__(*args, **kw)
class ZnO(Hexagonal):
def __init__(self, *args, **kw):
kw.setdefault('rho', 5680.0)
kw.setdefault('co11', 20.97e10)
kw.setdefault('co12', 12.11e10)
kw.setdefault('co13', 10.51e10)
kw.setdefault('co33', 21.09e10)
kw.setdefault('co44', 4.247e10)
super(ZnO, self).__init__(*args, **kw)
class CdS(Hexagonal):
def __init__(self, *args, **kw):
kw.setdefault('rho', 4820.0)
kw.setdefault('co11', 9.07e10)
kw.setdefault('co12', 5.81e10)
kw.setdefault('co13', 5.1e10)
kw.setdefault('co33', 9.38e10)
kw.setdefault('co44', 1.504e10)
super(CdS, self).__init__(*args, **kw)
class Zinc(Hexagonal):
def __init__(self, *args, **kw):
kw.setdefault('rho', 7.1*1.e-3/(1.e-2**3))
kw.setdefault('co11', 14.3e11*1.e-5/(1.e-2**2))
kw.setdefault('co12', 1.7e11*1.e-5/(1.e-2**2))
kw.setdefault('co13', 3.3e11*1.e-5/(1.e-2**2))
kw.setdefault('co33', 5.0e11*1.e-5/(1.e-2**2))
kw.setdefault('co44', 4.0e11*1.e-5/(1.e-2**2))
super(Zinc, self).__init__(*args, **kw)
class Beryl(Hexagonal):
def __init__(self, *args, **kw):
kw.setdefault('rho', 2.7*1.e-3/(1.e-2**3))
kw.setdefault('co11', 26.94e11*1.e-5/(1.e-2**2))
kw.setdefault('co12', 9.61e11*1.e-5/(1.e-2**2))
kw.setdefault('co13', 6.61e11*1.e-5/(1.e-2**2))
kw.setdefault('co33', 23.63e11*1.e-5/(1.e-2**2))
kw.setdefault('co44', 6.53e11*1.e-5/(1.e-2**2))
super(Beryl, self).__init__(*args, **kw)
class Albite(Triclinic):
def __init__(self, *args, **kw):
#kw.setdefault('rho', )
kw.setdefault('co11', 69.9e9)
kw.setdefault('co22', 183.5e9)
kw.setdefault('co33', 179.5e9)
kw.setdefault('co44', 24.9e9)
kw.setdefault('co55', 26.8e9)
kw.setdefault('co66', 33.5e9)
kw.setdefault('co12', 34.0e9)
kw.setdefault('co13', 30.8e9)
kw.setdefault('co14', 5.1e9)
kw.setdefault('co15', -2.4e9)
kw.setdefault('co16', -0.9e9)
kw.setdefault('co23', 5.5e9)
kw.setdefault('co24', -3.9e9)
kw.setdefault('co25', -7.7e9)
kw.setdefault('co26', -5.8e9)
kw.setdefault('co34', -8.7e9)
kw.setdefault('co35', 7.1e9)
kw.setdefault('co36', -9.8e9)
kw.setdefault('co45', -2.4e9)
kw.setdefault('co46', -7.2e9)
kw.setdefault('co56', 0.5e9)
super(Albite, self).__init__(*args, **kw)
class Acmite(Monoclinic):
def __init__(self, *args, **kw):
kw.setdefault('rho', 3.5e3)
kw.setdefault('co11', 185.8e9)
kw.setdefault('co22', 181.3e9)
kw.setdefault('co33', 234.4e9)
kw.setdefault('co44', 62.9e9)
kw.setdefault('co55', 51.0e9)
kw.setdefault('co66', 47.4e9)
kw.setdefault('co12', 68.5e9)
kw.setdefault('co13', 70.7e9)
kw.setdefault('co15', 9.8e9)
kw.setdefault('co23', 62.9e9)
kw.setdefault('co25', 9.4e9)
kw.setdefault('co35', 21.4e9)
kw.setdefault('co46', 7.7e9)
super(Acmite, self).__init__(*args, **kw)
class AlphaUranium(Orthorhombic):
def __init__(self, *args, **kw):
#kw.setdefault('rho', )
kw.setdefault('rho', 8.2e3) # a false value.
kw.setdefault('co11', 215.e9)
kw.setdefault('co22', 199.e9)
kw.setdefault('co33', 267.e9)
kw.setdefault('co44', 124.e9)
kw.setdefault('co55', 73.e9)
kw.setdefault('co66', 74.e9)
kw.setdefault('co12', 46.e9)
kw.setdefault('co13', 22.e9)
kw.setdefault('co23', 107.e9)
super(AlphaUranium, self).__init__(*args, **kw)
class BariumTitanate(Tetragonal):
def __init__(self, *args, **kw):
kw.setdefault('rho', 6.2e3)
kw.setdefault('co11', 275.0e9)
kw.setdefault('co33', 165.0e9)
kw.setdefault('co44', 54.3e9)
kw.setdefault('co66', 113.0e9)
kw.setdefault('co12', 179.0e9)
kw.setdefault('co13', 151.0e9)
super(BariumTitanate, self).__init__(*args, **kw)
class AlphaQuartz(Trigonal):
def __init__(self, *args, **kw):
kw.setdefault('rho', 2.651e3)
kw.setdefault('co11', 87.6e9)
kw.setdefault('co33', 106.8e9)
kw.setdefault('co44', 57.2e9)
kw.setdefault('co12', 6.1e9)
kw.setdefault('co13', 13.3e9)
kw.setdefault('co14', 17.3e9)
super(AlphaQuartz, self).__init__(*args, **kw)
class RickerSample(Isotropic):
def __init__(self, *args, **kw):
kw.setdefault('rho', 2200.e0)
kw.setdefault('co11', 3200.e0**2*2200.e0)
kw.setdefault('co44', 1847.5e0**2*2200.e0)
super(RickerSample, self).__init__(*args, **kw)
class RickerSampleLight(Isotropic):
def __init__(self, *args, **kw):
scale = 1.e-3
kw.setdefault('rho', 2200.e0*scale)
kw.setdefault('co11', 3200.e0**2*2200.e0*scale)
kw.setdefault('co44', 1847.5e0**2*2200.e0*scale)
super(RickerSampleLight, self).__init__(*args, **kw)
# End real material properties.
################################################################################
# vim: set ff=unix fenc=utf8 ft=python ai et sw=4 ts=4 tw=79:
|
yungyuc/solvcon
|
solvcon/parcel/linear/velstress/material.py
|
Python
|
bsd-3-clause
| 17,175
|
[
"CRYSTAL"
] |
c697857968a97d95f57dbf727c3a2bd879ef7422c3e7a5b30a4ce8fdfac13bb8
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
=================================================================================
Ensemble Similarity Calculations --- :mod:`MDAnalysis.analysis.encore.similarity`
=================================================================================
:Author: Matteo Tiberti, Wouter Boomsma, Tone Bengtsen
.. versionadded:: 0.16.0
The module contains implementations of similarity measures between protein
ensembles described in [Lindorff-Larsen2009]_. The implementation and examples
are described in [Tiberti2015]_.
The module includes facilities for handling ensembles and trajectories through
the :class:`Universe` class, performing clustering or dimensionality reduction
of the ensemble space, estimating multivariate probability distributions from
the input data, and more. ENCORE can be used to compare experimental and
simulation-derived ensembles, as well as estimate the convergence of
trajectories from time-dependent simulations.
ENCORE includes three different methods for calculations of similarity measures
between ensembles implemented in individual functions:
+ **Harmonic Ensemble Similarity** : :func:`hes`
+ **Clustering Ensemble Similarity** : :func:`ces`
+ **Dimensional Reduction Ensemble Similarity** : :func:`dres`
as well as two methods to evaluate the convergence of trajectories:
+ **Clustering based convergence evaluation** : :func:`ces_convergence`
+ **Dimensionality-reduction based convergence evaluation** : :func:`dres_convergence`
When using this module in published work please cite [Tiberti2015]_.
References
==========
.. [Lindorff-Larsen2009] Similarity Measures for Protein Ensembles. Lindorff-Larsen, K. Ferkinghoff-Borg, J. PLoS ONE 2008, 4, e4203.
.. [Tiberti2015] ENCORE: Software for Quantitative Ensemble Comparison. Matteo Tiberti, Elena Papaleo, Tone Bengtsen, Wouter Boomsma, Kresten Lindorff- Larsen. PLoS Comput Biol. 2015, 11
.. _Examples:
Examples
========
The examples show how to use ENCORE to calculate a similarity measurement
of two simple ensembles. The ensembles are obtained from the MDAnalysis
test suite for two different simulations of the protein AdK. To run the
examples first execute: ::
>>> from MDAnalysis import Universe
>>> import MDAnalysis.analysis.encore as encore
>>> from MDAnalysis.tests.datafiles import PSF, DCD, DCD2
To calculate the Harmonic Ensemble Similarity (:func:`hes`)
two ensemble objects are first created and then used for calculation: ::
>>> ens1 = Universe(PSF, DCD)
>>> ens2 = Universe(PSF, DCD2)
>>> print encore.hes([ens1, ens2])
(array([[ 0. , 38279683.95892926],
[ 38279683.95892926, 0. ]]), None)
Here None is returned in the array as the default details parameter is False.
HES can assume any non-negative value, i.e. no upper bound exists and the
measurement can therefore be used as an absolute scale.
The calculation of the Clustering Ensemble Similarity (:func:`ces`)
is computationally more expensive. It is based on clustering algorithms that in
turn require a similarity matrix between the frames the ensembles are made
of. The similarity matrix is derived from a distance matrix (By default a RMSD
matrix; a full RMSD matrix between each pairs of elements needs to be computed).
The RMSD matrix is automatically calculated. ::
>>> ens1 = Universe(PSF, DCD)
>>> ens2 = Universe(PSF, DCD2)
>>> CES, details = encore.ces([ens1, ens2])
>>> print CES
[[ 0. 0.68070702]
[ 0.68070702 0. ]]
However, we may want to reuse the RMSD matrix in other calculations e.g.
running CES with different parameters or running DRES. In this
case we first compute the RMSD matrix alone:
>>> rmsd_matrix = encore.get_distance_matrix(
encore.utils.merge_universes([ens1, ens2]),
save_matrix="rmsd.npz")
In the above example the RMSD matrix was also saved in rmsd.npz on disk, and
so can be loaded and re-used at later times, instead of being recomputed:
>>> rmsd_matrix = encore.get_distance_matrix(
encore.utils.merge_universes([ens1, ens2]),
load_matrix="rmsd.npz")
For instance, the rmsd_matrix object can be re-used as input for the
Dimensional Reduction Ensemble Similarity (:func:`dres`) method.
DRES is based on the estimation of the probability density in
a dimensionally-reduced conformational space of the ensembles, obtained from
the original space using either the Stochastic Proximity Embedding algorithm or
the Principal Component Analysis.
As the algorithms require the distance matrix calculated on the original space,
we can reuse the previously-calculated RMSD matrix.
In the following example the dimensions are reduced to 3 using the
saved RMSD matrix and the default SPE dimensional reduction method. : ::
>>> DRES,details = encore.dres([ens1, ens2],
distance_matrix = rmsd_matrix)
>>> print DRES
[[ 0. , 0.67453198]
[ 0.67453198, 0. ]]
In addition to the quantitative similarity estimate, the dimensional reduction
can easily be visualized, see the ``Example`` section in
:mod:`MDAnalysis.analysis.encore.dimensionality_reduction.reduce_dimensionality`.
Due to the stochastic nature of SPE, two identical ensembles will not
necessarily result in an exactly 0 estimate of the similarity, but will be very
close. For the same reason, calculating the similarity with the :func:`dres`
twice will not result in necessarily identical values but rather two very close
values.
It should be noted that both in :func:`ces` and :func:`dres` the similarity is
evaluated using the Jensen-Shannon divergence resulting in an upper bound of
ln(2), which indicates no similarity between the ensembles and a lower bound
of 0.0 signifying two identical ensembles. In contrast, the :func:`hes` function uses
a symmetrized version of the Kullback-Leibler divergence, which is unbounded.
Functions for ensemble comparisons
==================================
.. autofunction:: hes
.. autofunction:: ces
.. autofunction:: dres
Function reference
==================
.. All functions are included via automodule :members:.
"""
from __future__ import print_function
from six.moves import range, zip
import MDAnalysis as mda
import numpy as np
import warnings
import logging
try:
from scipy.stats import gaussian_kde
except ImportError:
gaussian_kde = None
msg = "scipy.stats.gaussian_kde could not be imported. " \
"Dimensionality reduction ensemble comparisons will not " \
"be available."
warnings.warn(msg,
category=ImportWarning)
logging.warn(msg)
del msg
from ...coordinates.memory import MemoryReader
from .confdistmatrix import get_distance_matrix
from .bootstrap import (get_distance_matrix_bootstrap_samples,
get_ensemble_bootstrap_samples)
from .clustering.cluster import cluster
from .clustering.ClusteringMethod import AffinityPropagationNative
from .dimensionality_reduction.DimensionalityReductionMethod import (
StochasticProximityEmbeddingNative)
from .dimensionality_reduction.reduce_dimensionality import (
reduce_dimensionality)
from .covariance import (
covariance_matrix, ml_covariance_estimator, shrinkage_covariance_estimator)
from .utils import merge_universes
from .utils import trm_indices_diag, trm_indices_nodiag
# Low boundary value for log() argument - ensure no nans
EPSILON = 1E-15
xlogy = np.vectorize(
lambda x, y: 0.0 if (x <= EPSILON and y <= EPSILON) else x * np.log(y))
def discrete_kullback_leibler_divergence(pA, pB):
"""Kullback-Leibler divergence between discrete probability distribution.
Notice that since this measure is not symmetric ::
:math:`d_{KL}(p_A,p_B) != d_{KL}(p_B,p_A)`
Parameters
----------
pA : iterable of floats
First discrete probability density function
pB : iterable of floats
Second discrete probability density function
Returns
-------
dkl : float
Discrete Kullback-Liebler divergence
"""
return np.sum(xlogy(pA, pA / pB))
# discrete dJS
def discrete_jensen_shannon_divergence(pA, pB):
"""Jensen-Shannon divergence between discrete probability distributions.
Parameters
----------
pA : iterable of floats
First discrete probability density function
pB : iterable of floats
Second discrete probability density function
Returns
-------
djs : float
Discrete Jensen-Shannon divergence
"""
return 0.5 * (discrete_kullback_leibler_divergence(pA, (pA + pB) * 0.5) +
discrete_kullback_leibler_divergence(pB, (pA + pB) * 0.5))
# calculate harmonic similarity
def harmonic_ensemble_similarity(sigma1,
sigma2,
x1,
x2):
"""
Calculate the harmonic ensemble similarity measure
as defined in [Tiberti2015]_.
Parameters
----------
sigma1 : numpy.array
Covariance matrix for the first ensemble.
sigma2 : numpy.array
Covariance matrix for the second ensemble.
x1: numpy.array
Mean for the estimated normal multivariate distribution of the first
ensemble.
x2: numpy.array
Mean for the estimated normal multivariate distribution of the second
ensemble.
Returns
-------
dhes : float
harmonic similarity measure
"""
# Inverse covariance matrices
sigma1_inv = np.linalg.pinv(sigma1)
sigma2_inv = np.linalg.pinv(sigma2)
# Difference between average vectors
d_avg = x1 - x2
# Distance measure
trace = np.trace(np.dot(sigma1, sigma2_inv) +
np.dot(sigma2, sigma1_inv)
- 2 * np.identity(sigma1.shape[0]))
d_hes = 0.25 * (np.dot(np.transpose(d_avg),
np.dot(sigma1_inv + sigma2_inv,
d_avg)) + trace)
return d_hes
def clustering_ensemble_similarity(cc, ens1, ens1_id, ens2, ens2_id,
selection="name CA"):
"""Clustering ensemble similarity: calculate the probability densities from
the clusters and calculate discrete Jensen-Shannon divergence.
Parameters
----------
cc : encore.clustering.ClustersCollection
Collection from cluster calculated by a clustering algorithm
(e.g. Affinity propagation)
ens1 : :class:`~MDAnalysis.core.universe.Universe`
First ensemble to be used in comparison
ens1_id : int
First ensemble id as detailed in the ClustersCollection metadata
ens2 : :class:`~MDAnalysis.core.universe.Universe`
Second ensemble to be used in comparison
ens2_id : int
Second ensemble id as detailed in the ClustersCollection metadata
selection : str
Atom selection string in the MDAnalysis format. Default is "name CA".
Returns
-------
djs : float
Jensen-Shannon divergence between the two ensembles, as calculated by
the clustering ensemble similarity method
"""
ens1_coordinates = ens1.trajectory.timeseries(ens1.select_atoms(selection),
format='fac')
ens2_coordinates = ens2.trajectory.timeseries(ens2.select_atoms(selection),
format='fac')
tmpA = np.array([np.where(c.metadata['ensemble_membership'] == ens1_id)[
0].shape[0] / float(ens1_coordinates.shape[0]) for
c in cc])
tmpB = np.array([np.where(c.metadata['ensemble_membership'] == ens2_id)[
0].shape[0] / float(ens2_coordinates.shape[0]) for
c in cc])
# Exclude clusters which have 0 elements in both ensembles
pA = tmpA[tmpA + tmpB > EPSILON]
pB = tmpB[tmpA + tmpB > EPSILON]
return discrete_jensen_shannon_divergence(pA, pB)
def cumulative_clustering_ensemble_similarity(cc, ens1_id, ens2_id,
ens1_id_min=1, ens2_id_min=1):
"""
Calculate clustering ensemble similarity between joined ensembles.
This means that, after clustering has been performed, some ensembles are
merged and the dJS is calculated between the probability distributions of
the two clusters groups. In particular, the two ensemble groups are defined
by their ensembles id: one of the two joined ensembles will comprise all
the ensembles with id [ens1_id_min, ens1_id], and the other ensembles will
comprise all the ensembles with id [ens2_id_min, ens2_id].
Parameters
----------
cc : encore.ClustersCollection
Collection from cluster calculated by a clustering algorithm
(e.g. Affinity propagation)
ens1_id : int
First ensemble id as detailed in the ClustersCollection
metadata
ens2_id : int
Second ensemble id as detailed in the ClustersCollection
metadata
Returns
-------
djs : float
Jensen-Shannon divergence between the two ensembles, as
calculated by the clustering ensemble similarity method
"""
ensA = [np.where(np.logical_and(
c.metadata['ensemble_membership'] <= ens1_id,
c.metadata['ensemble_membership'])
>= ens1_id_min)[0].shape[0] for c in cc]
ensB = [np.where(np.logical_and(
c.metadata['ensemble_membership'] <= ens2_id,
c.metadata['ensemble_membership'])
>= ens2_id_min)[0].shape[0] for c in cc]
sizeA = float(np.sum(ensA))
sizeB = float(np.sum(ensB))
tmpA = np.array(ensA) / sizeA
tmpB = np.array(ensB) / sizeB
# Exclude clusters which have 0 elements in both ensembles
pA = tmpA[tmpA + tmpB > EPSILON]
pB = tmpB[tmpA + tmpB > EPSILON]
return discrete_jensen_shannon_divergence(pA, pB)
def gen_kde_pdfs(embedded_space, ensemble_assignment, nensembles,
nsamples):
"""
Generate Kernel Density Estimates (KDE) from embedded spaces and
elaborate the coordinates for later use.
Parameters
----------
embedded_space : numpy.array
Array containing the coordinates of the embedded space
ensemble_assignment : numpy.array
Array containing one int per ensemble conformation. These allow to
distinguish, in the complete embedded space, which conformations
belong to each ensemble. For instance if ensemble_assignment
is [1,1,1,1,2,2], it means that the first four conformations belong
to ensemble 1 and the last two to ensemble 2
nensembles : int
Number of ensembles
nsamples : int
samples to be drawn from the ensembles. Will be required in
a later stage in order to calculate dJS.
Returns
-------
kdes : scipy.stats.gaussian_kde
KDEs calculated from ensembles
resamples : list of numpy.array
For each KDE, draw samples according to the probability distribution
of the KDE mixture model
embedded_ensembles : list of numpy.array
List of numpy.array containing, each one, the elements of the
embedded space belonging to a certain ensemble
"""
kdes = []
embedded_ensembles = []
resamples = []
if gaussian_kde is None:
# hack: if we are running with minimal dependencies then scipy was
# not imported and we have to bail here (see scipy import at top)
raise ImportError("For Kernel Density Estimation functionality you"
"need to import scipy")
for i in range(1, nensembles + 1):
this_embedded = embedded_space.transpose()[
np.where(np.array(ensemble_assignment) == i)].transpose()
embedded_ensembles.append(this_embedded)
kdes.append(gaussian_kde(
this_embedded))
# # Set number of samples
# if not nsamples:
# nsamples = this_embedded.shape[1] * 10
# Resample according to probability distributions
for this_kde in kdes:
resamples.append(this_kde.resample(nsamples))
return (kdes, resamples, embedded_ensembles)
def dimred_ensemble_similarity(kde1, resamples1, kde2, resamples2,
ln_P1_exp_P1=None, ln_P2_exp_P2=None,
ln_P1P2_exp_P1=None, ln_P1P2_exp_P2=None):
"""
Calculate the Jensen-Shannon divergence according the the
Dimensionality reduction method. In this case, we have continuous
probability densities, this we need to integrate over the measurable
space. The aim is to first calculate the Kullback-Liebler divergence, which
is defined as:
.. math::
D_{KL}(P(x) || Q(x)) = \\int_{-\\infty}^{\\infty}P(x_i) ln(P(x_i)/Q(x_i)) = \\langle{}ln(P(x))\\rangle{}_P - \\langle{}ln(Q(x))\\rangle{}_P
where the :math:`\\langle{}.\\rangle{}_P` denotes an expectation calculated
under the distribution P. We can, thus, just estimate the expectation
values of the components to get an estimate of dKL.
Since the Jensen-Shannon distance is actually more complex, we need to
estimate four expectation values:
.. math::
\\langle{}log(P(x))\\rangle{}_P
\\langle{}log(Q(x))\\rangle{}_Q
\\langle{}log(0.5*(P(x)+Q(x)))\\rangle{}_P
\\langle{}log(0.5*(P(x)+Q(x)))\\rangle{}_Q
Parameters
----------
kde1 : scipy.stats.gaussian_kde
Kernel density estimation for ensemble 1
resamples1 : numpy.array
Samples drawn according do kde1. Will be used as samples to
calculate the expected values according to 'P' as detailed before.
kde2 : scipy.stats.gaussian_kde
Kernel density estimation for ensemble 2
resamples2 : numpy.array
Samples drawn according do kde2. Will be used as sample to
calculate the expected values according to 'Q' as detailed before.
ln_P1_exp_P1 : float or None
Use this value for :math:`\\langle{}log(P(x))\\rangle{}_P`; if None,
calculate it instead
ln_P2_exp_P2 : float or None
Use this value for :math:`\\langle{}log(Q(x))\\rangle{}_Q`; if
None, calculate it instead
ln_P1P2_exp_P1 : float or None
Use this value for
:math:`\\langle{}log(0.5*(P(x)+Q(x)))\\rangle{}_P`;
if None, calculate it instead
ln_P1P2_exp_P2 : float or None
Use this value for
:math:`\\langle{}log(0.5*(P(x)+Q(x)))\\rangle{}_Q`;
if None, calculate it instead
Returns
-------
djs : float
Jensen-Shannon divergence calculated according to the dimensionality
reduction method
"""
if not ln_P1_exp_P1 and not ln_P2_exp_P2 and not ln_P1P2_exp_P1 and not \
ln_P1P2_exp_P2:
ln_P1_exp_P1 = np.average(np.log(kde1.evaluate(resamples1)))
ln_P2_exp_P2 = np.average(np.log(kde2.evaluate(resamples2)))
ln_P1P2_exp_P1 = np.average(np.log(
0.5 * (kde1.evaluate(resamples1) + kde2.evaluate(resamples1))))
ln_P1P2_exp_P2 = np.average(np.log(
0.5 * (kde1.evaluate(resamples2) + kde2.evaluate(resamples2))))
return 0.5 * (
ln_P1_exp_P1 - ln_P1P2_exp_P1 + ln_P2_exp_P2 - ln_P1P2_exp_P2)
def cumulative_gen_kde_pdfs(embedded_space, ensemble_assignment, nensembles,
nsamples, ens_id_min=1, ens_id_max=None):
"""
Generate Kernel Density Estimates (KDE) from embedded spaces and
elaborate the coordinates for later use. However, consider more than
one ensemble as the space on which the KDE will be generated. In
particular, will use ensembles with ID [ens_id_min, ens_id_max].
Parameters
----------
embedded_space : numpy.array
Array containing the coordinates of the embedded space
ensemble_assignment : numpy.array
array containing one int per ensemble conformation. These allow
to distinguish, in the complete embedded space, which
conformations belong to each ensemble. For instance if
ensemble_assignment is [1,1,1,1,2,2], it means that the first
four conformations belong to ensemble 1 and the last two
to ensemble 2
nensembles : int
Number of ensembles
nsamples : int
Samples to be drawn from the ensembles. Will be required in a later
stage in order to calculate dJS.
ens_id_min : int
Minimum ID of the ensemble to be considered; see description
ens_id_max : int
Maximum ID of the ensemble to be considered; see description. If None,
it will be set to the maximum possible value given the number of
ensembles.
Returns
-------
kdes : scipy.stats.gaussian_kde
KDEs calculated from ensembles
resamples : list of numpy.array
For each KDE, draw samples according to the probability
distribution of the kde mixture model
embedded_ensembles : list of numpy.array
List of numpy.array containing, each one, the elements of the
embedded space belonging to a certain ensemble
"""
if gaussian_kde is None:
# hack: if we are running with minimal dependencies then scipy was
# not imported and we have to bail here (see scipy import at top)
raise ImportError("For Kernel Density Estimation functionality you"
"need to import scipy")
kdes = []
embedded_ensembles = []
resamples = []
if not ens_id_max:
ens_id_max = nensembles + 1
for i in range(ens_id_min, ens_id_max):
this_embedded = embedded_space.transpose()[np.where(
np.logical_and(ensemble_assignment >= ens_id_min,
ensemble_assignment <= i))].transpose()
embedded_ensembles.append(this_embedded)
kdes.append(
gaussian_kde(this_embedded))
# Resample according to probability distributions
for this_kde in kdes:
resamples.append(this_kde.resample(nsamples))
return (kdes, resamples, embedded_ensembles)
def write_output(matrix, base_fname=None, header="", suffix="",
extension="dat"):
"""
Write output matrix with a nice format, to stdout and optionally a file.
Parameters
----------
matrix : encore.utils.TriangularMatrix
Matrix containing the values to be printed
base_fname : str
Basic filename for output. If None, no files will be written, and
the matrix will be just printed on standard output
header : str
Text to be written just before the matrix
suffix : str
String to be concatenated to basename, in order to get the final
file name
extension : str
Extension for the output file
"""
if base_fname is not None:
fname = base_fname + "-" + suffix + "." + extension
else:
fname = None
matrix.square_print(header=header, fname=fname)
def prepare_ensembles_for_convergence_increasing_window(ensemble,
window_size,
selection="name CA"):
"""
Generate ensembles to be fed to ces_convergence or dres_convergence
from a single ensemble. Basically, the different slices the algorithm
needs are generated here.
Parameters
----------
ensemble : :class:`~MDAnalysis.core.universe.Universe` object
Input ensemble
window_size : int
size of the window (in number of frames) to be used
selection : str
Atom selection string in the MDAnalysis format. Default is "name CA"
Returns
-------
tmp_ensembles :
The original ensemble is divided into different ensembles, each being
a window_size-long slice of the original ensemble. The last
ensemble will be bigger if the length of the input ensemble
is not exactly divisible by window_size.
"""
ens_size = ensemble.trajectory.timeseries(ensemble.select_atoms(selection),
format='fac').shape[0]
rest_slices = ens_size / window_size
residuals = ens_size % window_size
slices_n = [0]
tmp_ensembles = []
for rs in range(rest_slices - 1):
slices_n.append(slices_n[-1] + window_size)
slices_n.append(slices_n[-1] + residuals + window_size)
for s,sl in enumerate(slices_n[:-1]):
tmp_ensembles.append(mda.Universe(
ensemble.filename,
ensemble.trajectory.timeseries(format='fac')
[slices_n[s]:slices_n[s + 1], :, :],
format=MemoryReader))
return tmp_ensembles
def hes(ensembles,
selection="name CA",
cov_estimator="shrinkage",
weights='mass',
align=False,
details=False,
estimate_error=False,
bootstrapping_samples=100,
calc_diagonal=False):
"""
Calculates the Harmonic Ensemble Similarity (HES) between ensembles using
the symmetrized version of Kullback-Leibler divergence as described
in [Tiberti2015]_.
Parameters
----------
ensembles : list
List of Universe objects for similarity measurements.
selection : str, optional
Atom selection string in the MDAnalysis format. Default is "name CA"
cov_estimator : str, optional
Covariance matrix estimator method, either shrinkage, `shrinkage`,
or Maximum Likelyhood, `ml`. Default is shrinkage.
weights : str/array_like, optional
specify optional weights. If ``mass`` then chose masses of ensemble atoms
align : bool, optional
Whether to align the ensembles before calculating their similarity.
Note: this changes the ensembles in-place, and will thus leave your
ensembles in an altered state.
(default is False)
details : bool, optional
Save the mean and covariance matrix for each
ensemble in a numpy array (default is False).
estimate_error : bool, optional
Whether to perform error estimation (default is False).
bootstrapping_samples : int, optional
Number of times the similarity matrix will be bootstrapped (default
is 100), only if estimate_error is True.
calc_diagonal : bool, optional
Whether to calculate the diagonal of the similarity scores
(i.e. the similarities of every ensemble against itself).
If this is False (default), 0.0 will be used instead.
Returns
-------
numpy.array (bidimensional)
Harmonic similarity measurements between each pair of ensembles.
Notes
-----
The method assumes that each ensemble is derived from a multivariate normal
distribution. The mean and covariance matrix are, thus, estimatated from
the distribution of each ensemble and used for comparision by the
symmetrized version of Kullback-Leibler divergence defined as:
.. math::
D_{KL}(P(x) || Q(x)) = \\int_{-\\infty}^{\\infty}P(x_i)
ln(P(x_i)/Q(x_i)) = \\langle{}ln(P(x))\\rangle{}_P -
\\langle{}ln(Q(x))\\rangle{}_P
where the :math:`\\langle{}.\\rangle{}_P` denotes an expectation
calculated under the distribution P.
For each ensemble, the mean conformation is estimated as the average over
the ensemble, and the covariance matrix is calculated by default using a
shrinkage estimation method (or by a maximum-likelihood method,
optionally).
Note that the symmetrized version of the Kullback-Leibler divergence has no
upper bound (unlike the Jensen-Shannon divergence used by for instance CES and DRES).
When using this similarity measure, consider whether you want to align
the ensembles first (see example below).
Example
-------
To calculate the Harmonic Ensemble similarity, two ensembles are created
as Universe objects from a topology file and two trajectories. The
topology- and trajectory files used are obtained from the MDAnalysis
test suite for two different simulations of the protein AdK. To run the
examples see the module `Examples`_ for how to import the files: ::
>>> ens1 = Universe(PSF, DCD)
>>> ens2 = Universe(PSF, DCD2)
>>> HES, details = encore.hes([ens1, ens2])
>>> print HES
[[ 0. 38279683.95892926]
[ 38279683.95892926 0. ]]
You can use the align=True option to align the ensembles first. This will
align everything to the current timestep in the first ensemble. Note that
this changes the ens1 and ens2 objects:
>>> print encore.hes([ens1, ens2], align=True)[0]
[[ 0. 6880.34140106]
[ 6880.34140106 0. ]]
Alternatively, for greater flexibility in how the alignment should be done
you can call use an AlignTraj object manually:
>>> from MDAnalysis.analysis import align
>>> align.AlignTraj(ens1, ens1, select="name CA", in_memory=True).run()
>>> align.AlignTraj(ens2, ens1, select="name CA", in_memory=True).run()
>>> print encore.hes([ens1, ens2])[0]
[[ 0. 7032.19607004]
[ 7032.19607004 0. ]]
"""
if not isinstance(weights, (list, tuple, np.ndarray)) and weights == 'mass':
weights = ['mass' for _ in range(len(ensembles))]
elif weights is not None:
if len(weights) != len(ensembles):
raise ValueError("need weights for every ensemble")
else:
weights = [None for _ in range(len(ensembles))]
# Ensure in-memory trajectories either by calling align
# with in_memory=True or by directly calling transfer_to_memory
# on the universe.
if align:
for e, w in zip(ensembles, weights):
mda.analysis.align.AlignTraj(e, ensembles[0],
select=selection,
weights=w,
in_memory=True).run()
else:
for ensemble in ensembles:
ensemble.transfer_to_memory()
if calc_diagonal:
pairs_indices = list(trm_indices_diag(len(ensembles)))
else:
pairs_indices = list(trm_indices_nodiag(len(ensembles)))
logging.info("Chosen metric: Harmonic similarity")
if cov_estimator == "shrinkage":
covariance_estimator = shrinkage_covariance_estimator
logging.info(" Covariance matrix estimator: Shrinkage")
elif cov_estimator == "ml":
covariance_estimator = ml_covariance_estimator
logging.info(" Covariance matrix estimator: Maximum Likelihood")
else:
logging.error(
"Covariance estimator {0} is not supported. "
"Choose between 'shrinkage' and 'ml'.".format(cov_estimator))
return None
out_matrix_eln = len(ensembles)
xs = []
sigmas = []
if estimate_error:
data = []
ensembles_list = []
for i, ensemble in enumerate(ensembles):
ensembles_list.append(
get_ensemble_bootstrap_samples(
ensemble,
samples=bootstrapping_samples))
for t in range(bootstrapping_samples):
logging.info("The coordinates will be bootstrapped.")
xs = []
sigmas = []
values = np.zeros((out_matrix_eln, out_matrix_eln))
for i, e_orig in enumerate(ensembles):
xs.append(np.average(
ensembles_list[i][t].trajectory.timeseries(
e_orig.select_atoms(selection),
format=('fac')),
axis=0).flatten())
sigmas.append(covariance_matrix(ensembles_list[i][t],
weights=weights[i],
estimator=covariance_estimator,
selection=selection))
for pair in pairs_indices:
value = harmonic_ensemble_similarity(x1=xs[pair[0]],
x2=xs[pair[1]],
sigma1=sigmas[pair[0]],
sigma2=sigmas[pair[1]])
values[pair[0], pair[1]] = value
values[pair[1], pair[0]] = value
data.append(values)
avgs = np.average(data, axis=0)
stds = np.std(data, axis=0)
return (avgs, stds)
# Calculate the parameters for the multivariate normal distribution
# of each ensemble
values = np.zeros((out_matrix_eln, out_matrix_eln))
for e, w in zip(ensembles, weights):
# Extract coordinates from each ensemble
coordinates_system = e.trajectory.timeseries(e.select_atoms(selection),
format='fac')
# Average coordinates in each system
xs.append(np.average(coordinates_system, axis=0).flatten())
# Covariance matrices in each system
sigmas.append(covariance_matrix(e,
weights=w,
estimator=covariance_estimator,
selection=selection))
for i, j in pairs_indices:
value = harmonic_ensemble_similarity(x1=xs[i],
x2=xs[j],
sigma1=sigmas[i],
sigma2=sigmas[j])
values[i, j] = value
values[j, i] = value
# Save details as required
if details:
kwds = {}
for i in range(out_matrix_eln):
kwds['ensemble{0:d}_mean'.format(i + 1)] = xs[i]
kwds['ensemble{0:d}_covariance_matrix'.format(i + 1)] = sigmas[i]
details = np.array(kwds)
else:
details = None
return values, details
def ces(ensembles,
selection="name CA",
clustering_method=AffinityPropagationNative(
preference=-1.0,
max_iter=500,
convergence_iter=50,
damping=0.9,
add_noise=True),
distance_matrix=None,
estimate_error=False,
bootstrapping_samples=10,
ncores=1,
calc_diagonal=False,
allow_collapsed_result=True):
"""
Calculates the Clustering Ensemble Similarity (CES) between ensembles
using the Jensen-Shannon divergence as described in
[Tiberti2015]_.
Parameters
----------
ensembles : list
List of ensemble objects for similarity measurements
selection : str, optional
Atom selection string in the MDAnalysis format. Default is "name CA"
clustering_method :
A single or a list of instances of the
:class:`MDAnalysis.analysis.encore.clustering.ClusteringMethod` classes
from the clustering module. Different parameters for the same clustering
method can be explored by adding different instances of the same
clustering class. Clustering methods options are the
Affinity Propagation (default), the DBSCAN and the KMeans. The latter
two methods need the sklearn python module installed.
distance_matrix : encore.utils.TriangularMatrix
Distance matrix clustering methods. If this parameter
is not supplied the matrix will be calculated on the fly.
estimate_error : bool, optional
Whether to perform error estimation (default is False).
Only bootstrapping mode is supported.
bootstrapping_samples : int, optional
number of samples to be used for estimating error.
ncores : int, optional
Maximum number of cores to be used (default is 1).
calc_diagonal : bool, optional
Whether to calculate the diagonal of the similarity scores
(i.e. the similarities of every ensemble against itself).
If this is False (default), 0.0 will be used instead.
allow_collapsed_result: bool, optional
Whether a return value of a list of one value should be collapsed
into just the value.
Returns
-------
ces, details : numpy.array, numpy.array
ces contains the similarity values, arranged in a numpy.array.
If only one clustering_method is provided the output will be a
2-dimensional square symmetrical numpy.array. The order of the matrix
elements depends on the order of the input ensembles: for instance, if
ensemble = [ens1, ens2, ens3]
the matrix elements [0,2] and [2,0] will both contain the similarity
value between ensembles ens1 and ens3.
Elaborating on the previous example, if *n* ensembles are given and *m*
clustering_methods are provided the output will be a list of *m* arrays
ordered by the input sequence of methods, each with a *n*x*n*
symmetrical similarity matrix.
details contains information on the clustering: the individual size of
each cluster, the centroids and the frames associated with each cluster.
Notes
-----
In the Jensen-Shannon divergence the upper bound of ln(2) signifies
no similarity between the two ensembles, the lower bound, 0.0,
signifies identical ensembles.
To calculate the CES, the affinity propagation method (or others, if
specified) is used to partition the whole space of conformations. The
population of each ensemble in each cluster is then taken as a probability
density function. Different probability density functions from each
ensemble are finally compared using the Jensen-Shannon divergence measure.
Examples
--------
To calculate the Clustering Ensemble similarity, two ensembles are
created as Universe object using a topology file and two trajectories. The
topology- and trajectory files used are obtained from the MDAnalysis
test suite for two different simulations of the protein AdK. To run the
examples see the module `Examples`_ for how to import the files.
Here the simplest case of just two instances of :class:`Universe` is illustrated: ::
>>> ens1 = Universe(PSF, DCD)
>>> ens2 = Universe(PSF, DCD2)
>>> CES,details = encore.ces([ens1,ens2])
>>> print CES
[[ 0. 0.68070702]
[ 0.68070702 0. ]]
To use a different clustering method, set the parameter clustering_method
(Note that the sklearn module must be installed). Likewise, different parameters
for the same clustering method can be explored by adding different
instances of the same clustering class: ::
>>> CES, details = encore.ces([ens1,ens2],
clustering_method = [encore.DBSCAN(eps=0.45),
encore.DBSCAN(eps=0.50)])
>>> print "eps=0.45: ", CES[0]
eps=0.45: [[ 0. 0.20447236]
[ 0.20447236 0. ]]
>>> print "eps=0.5: ", CES[1]
eps=0.5: [[ 0. 0.25331629]
[ 0.25331629 0. ]]"
"""
for ensemble in ensembles:
ensemble.transfer_to_memory()
if calc_diagonal:
pairs_indices = list(trm_indices_diag(len(ensembles)))
else:
pairs_indices = list(trm_indices_nodiag(len(ensembles)))
clustering_methods = clustering_method
if not hasattr(clustering_method, '__iter__'):
clustering_methods = [clustering_method]
any_method_accept_distance_matrix = \
np.any([method.accepts_distance_matrix for method in clustering_methods])
all_methods_accept_distance_matrix = \
np.all([method.accepts_distance_matrix for method in clustering_methods])
# Register which ensembles the samples belong to
ensemble_assignment = []
for i, ensemble in enumerate(ensembles):
ensemble_assignment += [i+1]*len(ensemble.trajectory)
# Calculate distance matrix if not provided
if any_method_accept_distance_matrix and not distance_matrix:
distance_matrix = get_distance_matrix(merge_universes(ensembles),
selection=selection,
ncores=ncores)
if estimate_error:
if any_method_accept_distance_matrix:
distance_matrix = \
get_distance_matrix_bootstrap_samples(
distance_matrix,
ensemble_assignment,
samples=bootstrapping_samples,
ncores=ncores)
if not all_methods_accept_distance_matrix:
ensembles_list = []
for i, ensemble in enumerate(ensembles):
ensembles_list.append(
get_ensemble_bootstrap_samples(
ensemble,
samples=bootstrapping_samples))
ensembles = []
for j in range(bootstrapping_samples):
ensembles.append([])
for i, e in enumerate(ensembles_list):
ensembles[-1].append(e[j])
else:
# if all methods accept distances matrices, duplicate
# ensemble so that it matches size of distance matrices
# (no need to resample them since they will not be used)
ensembles = [ensembles]*bootstrapping_samples
# Call clustering procedure
ccs = cluster(ensembles,
method= clustering_methods,
selection=selection,
distance_matrix = distance_matrix,
ncores = ncores,
allow_collapsed_result=False)
# Do error analysis
if estimate_error:
k = 0
values = {}
avgs = []
stds = []
for i, p in enumerate(clustering_methods):
failed_runs = 0
values[i] = []
for j in range(bootstrapping_samples):
if ccs[k].clusters is None:
failed_runs += 1
k += 1
continue
values[i].append(np.zeros((len(ensembles[j]),
len(ensembles[j]))))
for pair in pairs_indices:
# Calculate dJS
this_djs = \
clustering_ensemble_similarity(ccs[k],
ensembles[j][
pair[0]],
pair[0] + 1,
ensembles[j][
pair[1]],
pair[1] + 1,
selection=selection)
values[i][-1][pair[0], pair[1]] = this_djs
values[i][-1][pair[1], pair[0]] = this_djs
k += 1
outs = np.array(values[i])
avgs.append(np.average(outs, axis=0))
stds.append(np.std(outs, axis=0))
if hasattr(clustering_method, '__iter__'):
pass
else:
avgs = avgs[0]
stds = stds[0]
return avgs, stds
values = []
details = {}
for i, p in enumerate(clustering_methods):
if ccs[i].clusters is None:
continue
else:
values.append(np.zeros((len(ensembles), len(ensembles))))
for pair in pairs_indices:
# Calculate dJS
this_val = \
clustering_ensemble_similarity(ccs[i],
ensembles[pair[0]],
pair[0] + 1,
ensembles[pair[1]],
pair[1] + 1,
selection=selection)
values[-1][pair[0], pair[1]] = this_val
values[-1][pair[1], pair[0]] = this_val
details['clustering'] = ccs
if allow_collapsed_result and not hasattr(clustering_method, '__iter__'):
values = values[0]
return values, details
def dres(ensembles,
selection="name CA",
dimensionality_reduction_method = StochasticProximityEmbeddingNative(
dimension=3,
distance_cutoff = 1.5,
min_lam=0.1,
max_lam=2.0,
ncycle=100,
nstep=10000),
distance_matrix=None,
nsamples=1000,
estimate_error=False,
bootstrapping_samples=100,
ncores=1,
calc_diagonal=False,
allow_collapsed_result=True):
"""
Calculates the Dimensional Reduction Ensemble Similarity (DRES) between
ensembles using the Jensen-Shannon divergence as described in
[Tiberti2015]_.
Parameters
----------
ensembles : list
List of ensemble objects for similarity measurements
selection : str, optional
Atom selection string in the MDAnalysis format. Default is "name CA"
dimensionality_reduction_method :
A single or a list of instances of the DimensionalityReductionMethod
classes from the dimensionality_reduction module. Different parameters
for the same method can be explored by adding different instances of
the same dimensionality reduction class. Provided methods are the
Stochastic Proximity Embedding (default) and the Principal Component
Analysis.
distance_matrix : encore.utils.TriangularMatrix
conformational distance matrix, It will be calculated on the fly
from the ensemble data if it is not provided.
nsamples : int, optional
Number of samples to be drawn from the ensembles (default is 1000).
This is used to resample the density estimates and calculate the
Jensen-Shannon divergence between ensembles.
estimate_error : bool, optional
Whether to perform error estimation (default is False)
bootstrapping_samples : int, optional
number of samples to be used for estimating error.
ncores : int, optional
Maximum number of cores to be used (default is 1).
calc_diagonal : bool, optional
Whether to calculate the diagonal of the similarity scores
(i.e. the simlarities of every ensemble against itself).
If this is False (default), 0.0 will be used instead.
allow_collapsed_result: bool, optional
Whether a return value of a list of one value should be collapsed
into just the value.
Returns
-------
dres, details : numpy.array, numpy.array
dres contains the similarity values, arranged in numpy.array.
If one number of dimensions is provided as an integer,
the output will be a 2-dimensional square symmetrical numpy.array.
The order of the matrix elements depends on the order of the
input ensemble: for instance, if
ensemble = [ens1, ens2, ens3]
then the matrix elements [0,2] and [2,0] will both contain the
similarity value between ensembles ens1 and ens3.
Elaborating on the previous example, if *n* ensembles are given and *m*
methods are provided the output will be a list of *m* arrays
ordered by the input sequence of methods, each with a *n*x*n*
symmetrical similarity matrix.
details provide an array of the reduced_coordinates.
Notes
-----
To calculate the similarity, the method first projects the ensembles into
lower dimensions by using the Stochastic Proximity Embedding (or others)
algorithm. A gaussian kernel-based density estimation method is then used
to estimate the probability density for each ensemble which is then used
to compute the Jensen-Shannon divergence between each pair of ensembles.
In the Jensen-Shannon divergence the upper bound of ln(2) signifies
no similarity between the two ensembles, the lower bound, 0.0,
signifies identical ensembles. However, due to the stochastic nature of
the dimensional reduction in :func:`dres`, two identical ensembles will
not necessarily result in an exact 0.0 estimate of the similarity but
will be very close. For the same reason, calculating the similarity with
the :func:`dres` twice will not result in two identical numbers; small
differences have to be expected.
Examples
--------
To calculate the Dimensional Reduction Ensemble similarity, two ensembles
are created as Universe objects from a topology file and two trajectories.
The topology- and trajectory files used are obtained from the MDAnalysis
test suite for two different simulations of the protein AdK. To run the
examples see the module `Examples`_ for how to import the files.
Here the simplest case of comparing just two instances of :class:`Universe` is
illustrated: ::
>>> ens1 = Universe(PSF,DCD)
>>> ens2 = Universe(PSF,DCD2)
>>> DRES, details = encore.dres([ens1,ens2])
>>> print DRES
[[ 0. 0.67996043]
[ 0.67996043 0. ]]
In addition to the quantitative similarity estimate, the dimensional
reduction can easily be visualized, see the ``Example`` section in
:mod:`MDAnalysis.analysis.encore.dimensionality_reduction.reduce_dimensionality``
To use a different dimensional reduction methods, simply set the
parameter dimensionality_reduction_method. Likewise, different parameters
for the same clustering method can be explored by adding different
instances of the same method class: ::
>>> DRES, details = encore.dres([ens1,ens2],
dimensionality_reduction_method = encore.PrincipalComponentAnalysis(dimension=2))
>>> print DRES
[[ 0. 0.69314718]
[ 0.69314718 0. ]]
"""
for ensemble in ensembles:
ensemble.transfer_to_memory()
if calc_diagonal:
pairs_indices = list(trm_indices_diag(len(ensembles)))
else:
pairs_indices = list(trm_indices_nodiag(len(ensembles)))
dimensionality_reduction_methods = dimensionality_reduction_method
if not hasattr(dimensionality_reduction_method, '__iter__'):
dimensionality_reduction_methods = [dimensionality_reduction_method]
any_method_accept_distance_matrix = \
np.any([method.accepts_distance_matrix for method in dimensionality_reduction_methods])
all_methods_accept_distance_matrix = \
np.all([method.accepts_distance_matrix for method in dimensionality_reduction_methods])
# Register which ensembles the samples belong to
ensemble_assignment = []
for i, ensemble in enumerate(ensembles):
ensemble_assignment += [i+1]*len(ensemble.trajectory)
# Calculate distance matrix if not provided
if any_method_accept_distance_matrix and not distance_matrix:
distance_matrix = get_distance_matrix(merge_universes(ensembles),
selection=selection,
ncores=ncores)
if estimate_error:
if any_method_accept_distance_matrix:
distance_matrix = \
get_distance_matrix_bootstrap_samples(
distance_matrix,
ensemble_assignment,
samples=bootstrapping_samples,
ncores=ncores)
if not all_methods_accept_distance_matrix:
ensembles_list = []
for i, ensemble in enumerate(ensembles):
ensembles_list.append(
get_ensemble_bootstrap_samples(
ensemble,
samples=bootstrapping_samples))
ensembles = []
for j in range(bootstrapping_samples):
ensembles.append(ensembles_list[i, j] for i
in range(ensembles_list.shape[0]))
else:
# if all methods accept distances matrices, duplicate
# ensemble so that it matches size of distance matrices
# (no need to resample them since they will not be used)
ensembles = [ensembles] * bootstrapping_samples
# Call dimensionality reduction procedure
coordinates, dim_red_details = reduce_dimensionality(
ensembles,
method=dimensionality_reduction_methods,
selection=selection,
distance_matrix = distance_matrix,
ncores = ncores,
allow_collapsed_result = False)
details = {}
details["reduced_coordinates"] = coordinates
details["dimensionality_reduction_details"] = details
if estimate_error:
k = 0
values = {}
avgs = []
stds = []
for i,method in enumerate(dimensionality_reduction_methods):
values[i] = []
for j in range(bootstrapping_samples):
values[i].append(np.zeros((len(ensembles[j]),
len(ensembles[j]))))
kdes, resamples, embedded_ensembles = gen_kde_pdfs(
coordinates[k],
ensemble_assignment,
len(ensembles[j]),
nsamples=nsamples)
for pair in pairs_indices:
this_value = dimred_ensemble_similarity(kdes[pair[0]],
resamples[pair[0]],
kdes[pair[1]],
resamples[pair[1]])
values[i][-1][pair[0], pair[1]] = this_value
values[i][-1][pair[1], pair[0]] = this_value
k += 1
outs = np.array(values[i])
avgs.append(np.average(outs, axis=0))
stds.append(np.std(outs, axis=0))
if hasattr(dimensionality_reduction_method, '__iter__'):
pass
else:
avgs = avgs[0]
stds = stds[0]
return avgs, stds
values = []
for i,method in enumerate(dimensionality_reduction_methods):
values.append(np.zeros((len(ensembles), len(ensembles))))
kdes, resamples, embedded_ensembles = gen_kde_pdfs(coordinates[i],
ensemble_assignment,
len(ensembles),
nsamples=nsamples)
for pair in pairs_indices:
this_value = dimred_ensemble_similarity(kdes[pair[0]],
resamples[pair[0]],
kdes[pair[1]],
resamples[pair[1]])
values[-1][pair[0], pair[1]] = this_value
values[-1][pair[1], pair[0]] = this_value
if allow_collapsed_result and not hasattr(dimensionality_reduction_method,
'__iter__'):
values = values[0]
return values, details
def ces_convergence(original_ensemble,
window_size,
selection="name CA",
clustering_method=AffinityPropagationNative(
preference=-1.0,
max_iter=500,
convergence_iter=50,
damping=0.9,
add_noise=True),
ncores=1):
"""
Use the CES to evaluate the convergence of the ensemble/trajectory.
CES will be calculated between the whole trajectory contained in an
ensemble and windows of such trajectory of increasing sizes, so that
the similarity values should gradually drop to zero. The rate at which
the value reach zero will be indicative of how much the trajectory
keeps on resampling the same regions of the conformational space, and
therefore of convergence.
Parameters
----------
original_ensemble : :class:`~MDAnalysis.core.universe.Universe` object
ensemble containing the trajectory whose convergence has to estimated
window_size : int
Size of window to be used, in number of frames
selection : str, optional
Atom selection string in the MDAnalysis format. Default is "name CA"
clustering_method : MDAnalysis.analysis.encore.clustering.ClusteringMethod
A single or a list of instances of the ClusteringMethod classes from
the clustering module. Different parameters for the same clustering
method can be explored by adding different instances of the same
clustering class.
ncores : int, optional
Maximum number of cores to be used (default is 1).
Returns
-------
out : np.array
array of shape (number_of_frames / window_size, preference_values).
Example
--------
To calculate the convergence of a trajectory using the clustering ensemble
similarity method a Universe object is created from a topology file and the
trajectory. The topology- and trajectory files used are obtained from the
MDAnalysis test suite for two different simulations of the protein AdK.
To run the examples see the module `Examples`_ for how to import the files.
Here the simplest case of evaluating the convergence is illustrated by
splitting the trajectory into a window_size of 10 frames : ::
>>> ens1 = Universe(PSF,DCD)
>>> ces_conv = encore.ces_convergence(ens1, 10)
>>> print ces_conv
[[ 0.48194205]
[ 0.40284672]
[ 0.31699026]
[ 0.25220447]
[ 0.19829817]
[ 0.14642725]
[ 0.09911411]
[ 0.05667391]
[ 0. ]]
"""
ensembles = prepare_ensembles_for_convergence_increasing_window(
original_ensemble, window_size, selection=selection)
ccs = cluster(ensembles,
selection=selection,
method=clustering_method,
allow_collapsed_result=False,
ncores=ncores)
out = []
for cc in ccs:
if cc.clusters is None:
continue
out.append(np.zeros(len(ensembles)))
for j, ensemble in enumerate(ensembles):
out[-1][j] = cumulative_clustering_ensemble_similarity(
cc,
len(ensembles),
j + 1)
out = np.array(out).T
return out
def dres_convergence(original_ensemble,
window_size,
selection="name CA",
dimensionality_reduction_method = \
StochasticProximityEmbeddingNative(
dimension=3,
distance_cutoff=1.5,
min_lam=0.1,
max_lam=2.0,
ncycle=100,
nstep=10000
),
nsamples=1000,
ncores=1):
"""
Use the DRES to evaluate the convergence of the ensemble/trajectory.
DRES will be calculated between the whole trajectory contained in an
ensemble and windows of such trajectory of increasing sizes, so that
the similarity values should gradually drop to zero. The rate at which
the value reach zero will be indicative of how much the trajectory
keeps on resampling the same ares of the conformational space, and
therefore of convergence.
Parameters
----------
original_ensemble : :class:`~MDAnalysis.core.universe.Universe` object
ensemble containing the trajectory whose convergence has to estimated
window_size : int
Size of window to be used, in number of frames
selection : str, optional
Atom selection string in the MDAnalysis format. Default is "name CA"
dimensionality_reduction_method :
A single or a list of instances of the DimensionalityReductionMethod
classes from the dimensionality_reduction module. Different parameters
for the same method can be explored by adding different instances of
the same dimensionality reduction class.
nsamples : int, optional
Number of samples to be drawn from the ensembles (default is 1000).
This is akin to the nsamples parameter of dres().
ncores : int, optional
Maximum number of cores to be used (default is 1).
Returns
-------
out : np.array
array of shape (number_of_frames / window_size, preference_values).
Example
--------
To calculate the convergence of a trajectory using the DRES
method, a Universe object is created from a topology file and the
trajectory. The topology- and trajectory files used are obtained from the
MDAnalysis test suite for two different simulations of the protein AdK.
To run the examples see the module `Examples`_ for how to import the files.
Here the simplest case of evaluating the convergence is illustrated by
splitting the trajectory into a window_size of 10 frames : ::
>>> ens1 = Universe(PSF,DCD)
>>> dres_conv = encore.dres_convergence(ens1, 10)
>>> print dres_conv
[[ 0.5295528 ]
[ 0.40716539]
[ 0.31158669]
[ 0.25314041]
[ 0.20447271]
[ 0.13212364]
[ 0.06979114]
[ 0.05214759]
[ 0. ]]
Here, the rate at which the values reach zero will be indicative of how
much the trajectory keeps on resampling the same ares of the conformational
space, and therefore of convergence.
"""
ensembles = prepare_ensembles_for_convergence_increasing_window(
original_ensemble, window_size, selection=selection)
coordinates, dimred_details = \
reduce_dimensionality(
ensembles,
selection=selection,
method=dimensionality_reduction_method,
allow_collapsed_result=False,
ncores=ncores)
ensemble_assignment = []
for i, ensemble in enumerate(ensembles):
ensemble_assignment += [i+1]*len(ensemble.trajectory)
ensemble_assignment = np.array(ensemble_assignment)
out = []
for i, _ in enumerate(coordinates):
out.append(np.zeros(len(ensembles)))
kdes, resamples, embedded_ensembles = \
cumulative_gen_kde_pdfs(
coordinates[i],
ensemble_assignment=ensemble_assignment,
nensembles=len(ensembles),
nsamples=nsamples)
for j, ensemble in enumerate(ensembles):
out[-1][j] = dimred_ensemble_similarity(kdes[-1],
resamples[-1],
kdes[j],
resamples[j])
out = np.array(out).T
return out
|
alejob/mdanalysis
|
package/MDAnalysis/analysis/encore/similarity.py
|
Python
|
gpl-2.0
| 64,668
|
[
"Gaussian",
"MDAnalysis"
] |
95604d9ccc625a9c05f568a3379fcab9d74852f046d13624a9ddecffed1e66a1
|
# -*- coding: utf-8 -*-
###############################################################################
# This source file is part of the Tomviz project, https://tomviz.org/.
# It is released under the 3-Clause BSD License, see "LICENSE".
###############################################################################
from abc import ABCMeta, abstractmethod
class FileType(object):
"""
Container class for file type information.
"""
def __init__(self, display_name=None, extensions=None):
self.display_name = display_name
self.extensions = extensions
def __str__(self):
return "%s (%s)" % (self.display_name,
" ".join(["*."+ext for ext in self.extensions]))
class IOBase(object, metaclass=ABCMeta):
@staticmethod
@abstractmethod
def file_type():
"""
:returns An instance of the FileFormat class. This is used to associate
a file type with a reader.
:rtype tomviz.io.FileType
"""
class Reader(IOBase, metaclass=ABCMeta):
"""
The base reader class from which readers should be derived.
"""
"""
Set to True if reader supports loading image stacks.
"""
supports_stacks = False
@abstractmethod
def read(self, file):
"""
:returns Return a vtkDataObject containing the scalars read.
:param file: the path or file object to read from
:rtype vtk.vtkDataObject
"""
class Writer(IOBase, metaclass=ABCMeta):
"""
The base reader class from which writers should be derived.
"""
@abstractmethod
def write(self, file, data):
"""
:param file: the path or file object to write to
:param data: The data to write to the file.
:type data: The vtkDataObject instance.
"""
|
OpenChemistry/tomviz
|
tomviz/python/tomviz/io/__init__.py
|
Python
|
bsd-3-clause
| 1,825
|
[
"VTK"
] |
23dfb2dd79ef714c6f6f25c8f6bb3429fcb48599f0018776c36a1b0442a1ac7e
|
# shieldBoostAmplifierPassive
#
# Used by:
# Implants named like: grade Crystal (15 of 18)
type = "passive"
def handler(fit, container, context):
fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Shield Operation"),
"shieldBonus", container.getModifiedItemAttr("shieldBoostMultiplier"))
|
Ebag333/Pyfa
|
eos/effects/shieldboostamplifierpassive.py
|
Python
|
gpl-3.0
| 343
|
[
"CRYSTAL"
] |
09193185ba4870ec4af6a9abdbed696eff854a60a53cecb960286f4a735d413e
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Classes for reading/manipulating/writing VASP input files. All major VASP input
files.
"""
import os
import re
import itertools
import warnings
import logging
import math
import json
import glob
import subprocess
import numpy as np
from numpy.linalg import det
from collections import OrderedDict, namedtuple
from hashlib import md5
from monty.io import zopen
from monty.os.path import zpath
from monty.json import MontyDecoder
from monty.os import cd
from monty.serialization import loadfn
from enum import Enum
from tabulate import tabulate
import scipy.constants as const
from pymatgen import SETTINGS, __version__
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.periodic_table import Element, get_el_sp
from pymatgen.electronic_structure.core import Magmom
from pymatgen.util.string import str_delimited
from pymatgen.util.io_utils import clean_lines
from pymatgen.util.typing import PathLike
from monty.json import MSONable
__author__ = "Shyue Ping Ong, Geoffroy Hautier, Rickard Armiento, Vincent L Chevrier, Stephen Dacek"
__copyright__ = "Copyright 2011, The Materials Project"
logger = logging.getLogger(__name__)
class Poscar(MSONable):
"""
Object for representing the data in a POSCAR or CONTCAR file.
Please note that this current implementation. Most attributes can be set
directly.
.. attribute:: structure
Associated Structure.
.. attribute:: comment
Optional comment string.
.. attribute:: true_names
Boolean indication whether Poscar contains actual real names parsed
from either a POTCAR or the POSCAR itself.
.. attribute:: selective_dynamics
Selective dynamics attribute for each site if available. A Nx3 array of
booleans.
.. attribute:: velocities
Velocities for each site (typically read in from a CONTCAR). A Nx3
array of floats.
.. attribute:: predictor_corrector
Predictor corrector coordinates and derivatives for each site; i.e.
a list of three 1x3 arrays for each site (typically read in from a MD
CONTCAR).
.. attribute:: predictor_corrector_preamble
Predictor corrector preamble contains the predictor-corrector key,
POTIM, and thermostat parameters that precede the site-specic predictor
corrector data in MD CONTCAR
.. attribute:: temperature
Temperature of velocity Maxwell-Boltzmann initialization. Initialized
to -1 (MB hasn"t been performed).
"""
def __init__(
self,
structure: Structure,
comment: str = None,
selective_dynamics=None,
true_names: bool = True,
velocities=None,
predictor_corrector=None,
predictor_corrector_preamble=None,
sort_structure: bool = False,
):
"""
:param structure: Structure object.
:param comment: Optional comment line for POSCAR. Defaults to unit
cell formula of structure. Defaults to None.
:param selective_dynamics: bool values for selective dynamics,
where N is number of sites. Defaults to None.
:param true_names: Set to False if the names in the POSCAR are not
well-defined and ambiguous. This situation arises commonly in
vasp < 5 where the POSCAR sometimes does not contain element
symbols. Defaults to True.
:param velocities: Velocities for the POSCAR. Typically parsed
in MD runs or can be used to initialize velocities.
:param predictor_corrector: Predictor corrector for the POSCAR.
Typically parsed in MD runs.
:param predictor_corrector_preamble: Preamble to the predictor
corrector.
:param sort_structure: Whether to sort structure. Useful if species
are not grouped properly together.
"""
if structure.is_ordered:
site_properties = {}
if selective_dynamics:
site_properties["selective_dynamics"] = selective_dynamics
if velocities:
site_properties["velocities"] = velocities
if predictor_corrector:
site_properties["predictor_corrector"] = predictor_corrector
structure = Structure.from_sites(structure)
self.structure = structure.copy(site_properties=site_properties)
if sort_structure:
self.structure = self.structure.get_sorted_structure()
self.true_names = true_names
self.comment = structure.formula if comment is None else comment
self.predictor_corrector_preamble = predictor_corrector_preamble
else:
raise ValueError(
"Structure with partial occupancies cannot be " "converted into POSCAR!"
)
self.temperature = -1
@property
def velocities(self):
"""Velocities in Poscar"""
return self.structure.site_properties.get("velocities")
@property
def selective_dynamics(self):
"""Selective dynamics in Poscar"""
return self.structure.site_properties.get("selective_dynamics")
@property
def predictor_corrector(self):
"""Predictor corrector in Poscar"""
return self.structure.site_properties.get("predictor_corrector")
@velocities.setter # type: ignore
def velocities(self, velocities):
"""Setter for Poscar.velocities"""
self.structure.add_site_property("velocities", velocities)
@selective_dynamics.setter # type: ignore
def selective_dynamics(self, selective_dynamics):
"""Setter for Poscar.selective_dynamics"""
self.structure.add_site_property("selective_dynamics", selective_dynamics)
@predictor_corrector.setter # type: ignore
def predictor_corrector(self, predictor_corrector):
"""Setter for Poscar.predictor_corrector"""
self.structure.add_site_property("predictor_corrector", predictor_corrector)
@property
def site_symbols(self):
"""
Sequence of symbols associated with the Poscar. Similar to 6th line in
vasp 5+ POSCAR.
"""
syms = [site.specie.symbol for site in self.structure]
return [a[0] for a in itertools.groupby(syms)]
@property
def natoms(self):
"""
Sequence of number of sites of each type associated with the Poscar.
Similar to 7th line in vasp 5+ POSCAR or the 6th line in vasp 4 POSCAR.
"""
syms = [site.specie.symbol for site in self.structure]
return [len(tuple(a[1])) for a in itertools.groupby(syms)]
def __setattr__(self, name, value):
if name in ("selective_dynamics", "velocities"):
if value is not None and len(value) > 0:
value = np.array(value)
dim = value.shape
if dim[1] != 3 or dim[0] != len(self.structure):
raise ValueError(
name + " array must be same length as" + " the structure."
)
value = value.tolist()
super().__setattr__(name, value)
@staticmethod
def from_file(filename, check_for_POTCAR=True, read_velocities=True):
"""
Reads a Poscar from a file.
The code will try its best to determine the elements in the POSCAR in
the following order:
1. If check_for_POTCAR is True, the code will try to check if a POTCAR
is in the same directory as the POSCAR and use elements from that by
default. (This is the VASP default sequence of priority).
2. If the input file is Vasp5-like and contains element symbols in the
6th line, the code will use that if check_for_POTCAR is False or there
is no POTCAR found.
3. Failing (2), the code will check if a symbol is provided at the end
of each coordinate.
If all else fails, the code will just assign the first n elements in
increasing atomic number, where n is the number of species, to the
Poscar. For example, H, He, Li, .... This will ensure at least a
unique element is assigned to each site and any analysis that does not
require specific elemental properties should work fine.
Args:
filename (str): File name containing Poscar data.
check_for_POTCAR (bool): Whether to check if a POTCAR is present
in the same directory as the POSCAR. Defaults to True.
read_velocities (bool): Whether to read or not velocities if they
are present in the POSCAR. Default is True.
Returns:
Poscar object.
"""
dirname = os.path.dirname(os.path.abspath(filename))
names = None
if check_for_POTCAR:
potcars = glob.glob(os.path.join(dirname, "*POTCAR*"))
if potcars:
try:
potcar = Potcar.from_file(sorted(potcars)[0])
names = [sym.split("_")[0] for sym in potcar.symbols]
[get_el_sp(n) for n in names] # ensure valid names
except Exception:
names = None
with zopen(filename, "rt") as f:
return Poscar.from_string(f.read(), names, read_velocities=read_velocities)
@staticmethod
def from_string(data, default_names=None, read_velocities=True):
"""
Reads a Poscar from a string.
The code will try its best to determine the elements in the POSCAR in
the following order:
1. If default_names are supplied and valid, it will use those. Usually,
default names comes from an external source, such as a POTCAR in the
same directory.
2. If there are no valid default names but the input file is Vasp5-like
and contains element symbols in the 6th line, the code will use that.
3. Failing (2), the code will check if a symbol is provided at the end
of each coordinate.
If all else fails, the code will just assign the first n elements in
increasing atomic number, where n is the number of species, to the
Poscar. For example, H, He, Li, .... This will ensure at least a
unique element is assigned to each site and any analysis that does not
require specific elemental properties should work fine.
Args:
data (str): String containing Poscar data.
default_names ([str]): Default symbols for the POSCAR file,
usually coming from a POTCAR in the same directory.
read_velocities (bool): Whether to read or not velocities if they
are present in the POSCAR. Default is True.
Returns:
Poscar object.
"""
# "^\s*$" doesn't match lines with no whitespace
chunks = re.split(r"\n\s*\n", data.rstrip(), flags=re.MULTILINE)
try:
if chunks[0] == "":
chunks.pop(0)
chunks[0] = "\n" + chunks[0]
except IndexError:
raise ValueError("Empty POSCAR")
# Parse positions
lines = tuple(clean_lines(chunks[0].split("\n"), False))
comment = lines[0]
scale = float(lines[1])
lattice = np.array([[float(i) for i in line.split()] for line in lines[2:5]])
if scale < 0:
# In vasp, a negative scale factor is treated as a volume. We need
# to translate this to a proper lattice vector scaling.
vol = abs(det(lattice))
lattice *= (-scale / vol) ** (1 / 3)
else:
lattice *= scale
vasp5_symbols = False
try:
natoms = [int(i) for i in lines[5].split()]
ipos = 6
except ValueError:
vasp5_symbols = True
symbols = lines[5].split()
"""
Atoms and number of atoms in POSCAR written with vasp appear on
multiple lines when atoms of the same type are not grouped together
and more than 20 groups are then defined ...
Example :
Cr16 Fe35 Ni2
1.00000000000000
8.5415010000000002 -0.0077670000000000 -0.0007960000000000
-0.0077730000000000 8.5224019999999996 0.0105580000000000
-0.0007970000000000 0.0105720000000000 8.5356889999999996
Fe Cr Fe Cr Fe Cr Fe Cr Fe Cr Fe Cr Fe Cr Fe Ni Fe Cr Fe Cr
Fe Ni Fe Cr Fe
1 1 2 4 2 1 1 1 2 1 1 1 4 1 1 1 5 3 6 1
2 1 3 2 5
Direct
...
"""
nlines_symbols = 1
for nlines_symbols in range(1, 11):
try:
int(lines[5 + nlines_symbols].split()[0])
break
except ValueError:
pass
for iline_symbols in range(6, 5 + nlines_symbols):
symbols.extend(lines[iline_symbols].split())
natoms = []
iline_natoms_start = 5 + nlines_symbols
for iline_natoms in range(
iline_natoms_start, iline_natoms_start + nlines_symbols
):
natoms.extend([int(i) for i in lines[iline_natoms].split()])
atomic_symbols = list()
for i in range(len(natoms)):
atomic_symbols.extend([symbols[i]] * natoms[i])
ipos = 5 + 2 * nlines_symbols
postype = lines[ipos].split()[0]
sdynamics = False
# Selective dynamics
if postype[0] in "sS":
sdynamics = True
ipos += 1
postype = lines[ipos].split()[0]
cart = postype[0] in "cCkK"
nsites = sum(natoms)
# If default_names is specified (usually coming from a POTCAR), use
# them. This is in line with Vasp"s parsing order that the POTCAR
# specified is the default used.
if default_names:
try:
atomic_symbols = []
for i in range(len(natoms)):
atomic_symbols.extend([default_names[i]] * natoms[i])
vasp5_symbols = True
except IndexError:
pass
if not vasp5_symbols:
ind = 3 if not sdynamics else 6
try:
# Check if names are appended at the end of the coordinates.
atomic_symbols = [
l.split()[ind] for l in lines[ipos + 1: ipos + 1 + nsites]
]
# Ensure symbols are valid elements
if not all([Element.is_valid_symbol(sym) for sym in atomic_symbols]):
raise ValueError("Non-valid symbols detected.")
vasp5_symbols = True
except (ValueError, IndexError):
# Defaulting to false names.
atomic_symbols = []
for i in range(len(natoms)):
sym = Element.from_Z(i + 1).symbol
atomic_symbols.extend([sym] * natoms[i])
warnings.warn(
"Elements in POSCAR cannot be determined. "
"Defaulting to false names %s." % " ".join(atomic_symbols)
)
# read the atomic coordinates
coords = []
selective_dynamics = list() if sdynamics else None
for i in range(nsites):
toks = lines[ipos + 1 + i].split()
crd_scale = scale if cart else 1
coords.append([float(j) * crd_scale for j in toks[:3]])
if sdynamics:
selective_dynamics.append([tok.upper()[0] == "T" for tok in toks[3:6]])
struct = Structure(
lattice,
atomic_symbols,
coords,
to_unit_cell=False,
validate_proximity=False,
coords_are_cartesian=cart,
)
if read_velocities:
# Parse velocities if any
velocities = []
if len(chunks) > 1:
for line in chunks[1].strip().split("\n"):
velocities.append([float(tok) for tok in line.split()])
# Parse the predictor-corrector data
predictor_corrector = []
predictor_corrector_preamble = None
if len(chunks) > 2:
lines = chunks[2].strip().split("\n")
# There are 3 sets of 3xN Predictor corrector parameters
# So can't be stored as a single set of "site_property"
# First line in chunk is a key in CONTCAR
# Second line is POTIM
# Third line is the thermostat parameters
predictor_corrector_preamble = (
lines[0] + "\n" + lines[1] + "\n" + lines[2]
)
# Rest is three sets of parameters, each set contains
# x, y, z predictor-corrector parameters for every atom in orde
lines = lines[3:]
for st in range(nsites):
d1 = [float(tok) for tok in lines[st].split()]
d2 = [float(tok) for tok in lines[st + nsites].split()]
d3 = [float(tok) for tok in lines[st + 2 * nsites].split()]
predictor_corrector.append([d1, d2, d3])
else:
velocities = None
predictor_corrector = None
predictor_corrector_preamble = None
return Poscar(
struct,
comment,
selective_dynamics,
vasp5_symbols,
velocities=velocities,
predictor_corrector=predictor_corrector,
predictor_corrector_preamble=predictor_corrector_preamble,
)
def get_string(self, direct=True, vasp4_compatible=False, significant_figures=6):
"""
Returns a string to be written as a POSCAR file. By default, site
symbols are written, which means compatibility is for vasp >= 5.
Args:
direct (bool): Whether coordinates are output in direct or
cartesian. Defaults to True.
vasp4_compatible (bool): Set to True to omit site symbols on 6th
line to maintain backward vasp 4.x compatibility. Defaults
to False.
significant_figures (int): No. of significant figures to
output all quantities. Defaults to 6. Note that positions are
output in fixed point, while velocities are output in
scientific format.
Returns:
String representation of POSCAR.
"""
# This corrects for VASP really annoying bug of crashing on lattices
# which have triple product < 0. We will just invert the lattice
# vectors.
latt = self.structure.lattice
if np.linalg.det(latt.matrix) < 0:
latt = Lattice(-latt.matrix)
format_str = "{{:.{0}f}}".format(significant_figures)
lines = [self.comment, "1.0"]
for v in latt.matrix:
lines.append(" ".join([format_str.format(c) for c in v]))
if self.true_names and not vasp4_compatible:
lines.append(" ".join(self.site_symbols))
lines.append(" ".join([str(x) for x in self.natoms]))
if self.selective_dynamics:
lines.append("Selective dynamics")
lines.append("direct" if direct else "cartesian")
selective_dynamics = self.selective_dynamics
for (i, site) in enumerate(self.structure):
coords = site.frac_coords if direct else site.coords
line = " ".join([format_str.format(c) for c in coords])
if selective_dynamics is not None:
sd = ["T" if j else "F" for j in selective_dynamics[i]]
line += " %s %s %s" % (sd[0], sd[1], sd[2])
line += " " + site.species_string
lines.append(line)
if self.velocities:
try:
lines.append("")
for v in self.velocities:
lines.append(" ".join([format_str.format(i) for i in v]))
except Exception:
warnings.warn("Velocities are missing or corrupted.")
if self.predictor_corrector:
lines.append("")
if self.predictor_corrector_preamble:
lines.append(self.predictor_corrector_preamble)
pred = np.array(self.predictor_corrector)
for col in range(3):
for z in pred[:, col]:
lines.append(" ".join([format_str.format(i) for i in z]))
else:
warnings.warn(
"Preamble information missing or corrupt. "
"Writing Poscar with no predictor corrector data."
)
return "\n".join(lines) + "\n"
def __repr__(self):
return self.get_string()
def __str__(self):
"""
String representation of Poscar file.
"""
return self.get_string()
def write_file(self, filename, **kwargs):
"""
Writes POSCAR to a file. The supported kwargs are the same as those for
the Poscar.get_string method and are passed through directly.
"""
with zopen(filename, "wt") as f:
f.write(self.get_string(**kwargs))
def as_dict(self):
"""
:return: MSONable dict.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"true_names": self.true_names,
"selective_dynamics": np.array(self.selective_dynamics).tolist(),
"velocities": self.velocities,
"predictor_corrector": self.predictor_corrector,
"comment": self.comment,
}
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation.
:return: Poscar
"""
return Poscar(
Structure.from_dict(d["structure"]),
comment=d["comment"],
selective_dynamics=d["selective_dynamics"],
true_names=d["true_names"],
velocities=d.get("velocities", None),
predictor_corrector=d.get("predictor_corrector", None),
)
def set_temperature(self, temperature):
"""
Initializes the velocities based on Maxwell-Boltzmann distribution.
Removes linear, but not angular drift (same as VASP)
Scales the energies to the exact temperature (microcanonical ensemble)
Velocities are given in A/fs. This is the vasp default when
direct/cartesian is not specified (even when positions are given in
direct coordinates)
Overwrites imported velocities, if any.
Args:
temperature (float): Temperature in Kelvin.
"""
# mean 0 variance 1
velocities = np.random.randn(len(self.structure), 3)
# in AMU, (N,1) array
atomic_masses = np.array(
[site.specie.atomic_mass.to("kg") for site in self.structure]
)
dof = 3 * len(self.structure) - 3
# scale velocities due to atomic masses
# mean 0 std proportional to sqrt(1/m)
velocities /= atomic_masses[:, np.newaxis] ** (1 / 2)
# remove linear drift (net momentum)
velocities -= np.average(
atomic_masses[:, np.newaxis] * velocities, axis=0
) / np.average(atomic_masses)
# scale velocities to get correct temperature
energy = np.sum(1 / 2 * atomic_masses * np.sum(velocities ** 2, axis=1))
scale = (temperature * dof / (2 * energy / const.k)) ** (1 / 2)
velocities *= scale * 1e-5 # these are in A/fs
self.temperature = temperature
try:
del self.structure.site_properties["selective_dynamics"]
except KeyError:
pass
try:
del self.structure.site_properties["predictor_corrector"]
except KeyError:
pass
# returns as a list of lists to be consistent with the other
# initializations
self.structure.add_site_property("velocities", velocities.tolist())
cwd = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(cwd, "incar_parameters.json")) as incar_params:
incar_params = json.loads(incar_params.read())
class BadIncarWarning(UserWarning):
"""
Warning class for bad Incar parameters.
"""
pass
class Incar(dict, MSONable):
"""
INCAR object for reading and writing INCAR files. Essentially consists of
a dictionary with some helper functions
"""
def __init__(self, params=None):
"""
Creates an Incar object.
Args:
params (dict): A set of input parameters as a dictionary.
"""
super().__init__()
if params:
# if Incar contains vector-like magmoms given as a list
# of floats, convert to a list of lists
if (
params.get("MAGMOM") and isinstance(params["MAGMOM"][0], (int, float))
) and (params.get("LSORBIT") or params.get("LNONCOLLINEAR")):
val = []
for i in range(len(params["MAGMOM"]) // 3):
val.append(params["MAGMOM"][i * 3: (i + 1) * 3])
params["MAGMOM"] = val
self.update(params)
def __setitem__(self, key, val):
"""
Add parameter-val pair to Incar. Warns if parameter is not in list of
valid INCAR tags. Also cleans the parameter and val by stripping
leading and trailing white spaces.
"""
super().__setitem__(
key.strip(),
Incar.proc_val(key.strip(), val.strip()) if isinstance(val, str) else val,
)
def as_dict(self):
"""
:return: MSONable dict.
"""
d = dict(self)
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation.
:return: Incar
"""
if d.get("MAGMOM") and isinstance(d["MAGMOM"][0], dict):
d["MAGMOM"] = [Magmom.from_dict(m) for m in d["MAGMOM"]]
return Incar({k: v for k, v in d.items() if k not in ("@module", "@class")})
def get_string(self, sort_keys=False, pretty=False):
"""
Returns a string representation of the INCAR. The reason why this
method is different from the __str__ method is to provide options for
pretty printing.
Args:
sort_keys (bool): Set to True to sort the INCAR parameters
alphabetically. Defaults to False.
pretty (bool): Set to True for pretty aligned output. Defaults
to False.
"""
keys = self.keys()
if sort_keys:
keys = sorted(keys)
lines = []
for k in keys:
if k == "MAGMOM" and isinstance(self[k], list):
value = []
if (
isinstance(self[k][0], list) or isinstance(self[k][0], Magmom)
) and (self.get("LSORBIT") or self.get("LNONCOLLINEAR")):
value.append(" ".join(str(i) for j in self[k] for i in j))
elif self.get("LSORBIT") or self.get("LNONCOLLINEAR"):
for m, g in itertools.groupby(self[k]):
value.append("3*{}*{}".format(len(tuple(g)), m))
else:
# float() to ensure backwards compatibility between
# float magmoms and Magmom objects
for m, g in itertools.groupby(self[k], lambda x: float(x)):
value.append("{}*{}".format(len(tuple(g)), m))
lines.append([k, " ".join(value)])
elif isinstance(self[k], list):
lines.append([k, " ".join([str(i) for i in self[k]])])
else:
lines.append([k, self[k]])
if pretty:
return str(tabulate([[l[0], "=", l[1]] for l in lines], tablefmt="plain"))
else:
return str_delimited(lines, None, " = ") + "\n"
def __str__(self):
return self.get_string(sort_keys=True, pretty=False)
def write_file(self, filename):
"""
Write Incar to a file.
Args:
filename (str): filename to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
@staticmethod
def from_file(filename):
"""
Reads an Incar object from a file.
Args:
filename (str): Filename for file
Returns:
Incar object
"""
with zopen(filename, "rt") as f:
return Incar.from_string(f.read())
@staticmethod
def from_string(string):
"""
Reads an Incar object from a string.
Args:
string (str): Incar string
Returns:
Incar object
"""
lines = list(clean_lines(string.splitlines()))
params = {}
for line in lines:
for sline in line.split(";"):
m = re.match(r"(\w+)\s*=\s*(.*)", sline.strip())
if m:
key = m.group(1).strip()
val = m.group(2).strip()
val = Incar.proc_val(key, val)
params[key] = val
return Incar(params)
@staticmethod
def proc_val(key, val):
"""
Static helper method to convert INCAR parameters to proper types, e.g.,
integers, floats, lists, etc.
Args:
key: INCAR parameter key
val: Actual value of INCAR parameter.
"""
list_keys = (
"LDAUU",
"LDAUL",
"LDAUJ",
"MAGMOM",
"DIPOL",
"LANGEVIN_GAMMA",
"QUAD_EFG",
"EINT",
)
bool_keys = (
"LDAU",
"LWAVE",
"LSCALU",
"LCHARG",
"LPLANE",
"LUSE_VDW",
"LHFCALC",
"ADDGRID",
"LSORBIT",
"LNONCOLLINEAR",
)
float_keys = (
"EDIFF",
"SIGMA",
"TIME",
"ENCUTFOCK",
"HFSCREEN",
"POTIM",
"EDIFFG",
"AGGAC",
"PARAM1",
"PARAM2",
)
int_keys = (
"NSW",
"NBANDS",
"NELMIN",
"ISIF",
"IBRION",
"ISPIN",
"ICHARG",
"NELM",
"ISMEAR",
"NPAR",
"LDAUPRINT",
"LMAXMIX",
"ENCUT",
"NSIM",
"NKRED",
"NUPDOWN",
"ISPIND",
"LDAUTYPE",
"IVDW",
)
def smart_int_or_float(numstr):
if numstr.find(".") != -1 or numstr.lower().find("e") != -1:
return float(numstr)
else:
return int(numstr)
try:
if key in list_keys:
output = []
toks = re.findall(
r"(-?\d+\.?\d*)\*?(-?\d+\.?\d*)?\*?(-?\d+\.?\d*)?", val
)
for tok in toks:
if tok[2] and "3" in tok[0]:
output.extend(
[smart_int_or_float(tok[2])] * int(tok[0]) * int(tok[1])
)
elif tok[1]:
output.extend([smart_int_or_float(tok[1])] * int(tok[0]))
else:
output.append(smart_int_or_float(tok[0]))
return output
if key in bool_keys:
m = re.match(r"^\.?([T|F|t|f])[A-Za-z]*\.?", val)
if m:
if m.group(1) == "T" or m.group(1) == "t":
return True
else:
return False
raise ValueError(key + " should be a boolean type!")
if key in float_keys:
return float(re.search(r"^-?\d*\.?\d*[e|E]?-?\d*", val).group(0))
if key in int_keys:
return int(re.match(r"^-?[0-9]+", val).group(0))
except ValueError:
pass
# Not in standard keys. We will try a hierarchy of conversions.
try:
val = int(val)
return val
except ValueError:
pass
try:
val = float(val)
return val
except ValueError:
pass
if "true" in val.lower():
return True
if "false" in val.lower():
return False
return val.strip().capitalize()
def diff(self, other):
"""
Diff function for Incar. Compares two Incars and indicates which
parameters are the same and which are not. Useful for checking whether
two runs were done using the same parameters.
Args:
other (Incar): The other Incar object to compare to.
Returns:
Dict of the following format:
{"Same" : parameters_that_are_the_same,
"Different": parameters_that_are_different}
Note that the parameters are return as full dictionaries of values.
E.g. {"ISIF":3}
"""
similar_param = {}
different_param = {}
for k1, v1 in self.items():
if k1 not in other:
different_param[k1] = {"INCAR1": v1, "INCAR2": None}
elif v1 != other[k1]:
different_param[k1] = {"INCAR1": v1, "INCAR2": other[k1]}
else:
similar_param[k1] = v1
for k2, v2 in other.items():
if k2 not in similar_param and k2 not in different_param:
if k2 not in self:
different_param[k2] = {"INCAR1": None, "INCAR2": v2}
return {"Same": similar_param, "Different": different_param}
def __add__(self, other):
"""
Add all the values of another INCAR object to this object.
Facilitates the use of "standard" INCARs.
"""
params = {k: v for k, v in self.items()}
for k, v in other.items():
if k in self and v != self[k]:
raise ValueError("Incars have conflicting values!")
else:
params[k] = v
return Incar(params)
def check_params(self):
"""
Raises a warning for nonsensical or non-existant INCAR tags and
parameters. If a keyword doesn't exist (e.g. theres a typo in a
keyword), your calculation will still run, however VASP will igore the
parameter without letting you know, hence why we have this Incar method.
"""
for k in self.keys():
# First check if this parameter even exists
if k not in incar_params.keys():
warnings.warn(
"Cannot find %s in the list of INCAR flags" % (k),
BadIncarWarning,
stacklevel=2,
)
if k in incar_params.keys():
if type(incar_params[k]).__name__ == "str":
# Now we check if this is an appropriate parameter type
if incar_params[k] == "float":
if not type(self[k]) not in ["float", "int"]:
warnings.warn(
"%s: %s is not real" % (k, self[k]),
BadIncarWarning,
stacklevel=2,
)
elif type(self[k]).__name__ != incar_params[k]:
warnings.warn(
"%s: %s is not a %s" % (k, self[k], incar_params[k]),
BadIncarWarning,
stacklevel=2,
)
# if we have a list of possible parameters, check
# if the user given parameter is in this list
elif type(incar_params[k]).__name__ == "list":
if self[k] not in incar_params[k]:
warnings.warn(
"%s: Cannot find %s in the list of parameters"
% (k, self[k]),
BadIncarWarning,
stacklevel=2,
)
class Kpoints_supported_modes(Enum):
"""
Enum type of all supported modes for Kpoint generation.
"""
Automatic = 0
Gamma = 1
Monkhorst = 2
Line_mode = 3
Cartesian = 4
Reciprocal = 5
def __str__(self):
return self.name
@staticmethod
def from_string(s: str) -> "Kpoints_supported_modes":
"""
:param s: String
:return: Kpoints_supported_modes
"""
c = s.lower()[0]
for m in Kpoints_supported_modes:
if m.name.lower()[0] == c:
return m
raise ValueError("Can't interprete Kpoint mode %s" % s)
class Kpoints(MSONable):
"""
KPOINT reader/writer.
"""
supported_modes = Kpoints_supported_modes
def __init__(
self,
comment="Default gamma",
num_kpts=0,
style=supported_modes.Gamma,
kpts=((1, 1, 1),),
kpts_shift=(0, 0, 0),
kpts_weights=None,
coord_type=None,
labels=None,
tet_number=0,
tet_weight=0,
tet_connections=None,
):
"""
Highly flexible constructor for Kpoints object. The flexibility comes
at the cost of usability and in general, it is recommended that you use
the default constructor only if you know exactly what you are doing and
requires the flexibility. For most usage cases, the three automatic
schemes can be constructed far more easily using the convenience static
constructors (automatic, gamma_automatic, monkhorst_automatic) and it
is recommended that you use those.
Args:
comment (str): String comment for Kpoints
num_kpts: Following VASP method of defining the KPOINTS file, this
parameter is the number of kpoints specified. If set to 0
(or negative), VASP automatically generates the KPOINTS.
style: Style for generating KPOINTS. Use one of the
Kpoints.supported_modes enum types.
kpts (2D array): 2D array of kpoints. Even when only a single
specification is required, e.g. in the automatic scheme,
the kpts should still be specified as a 2D array. e.g.,
[[20]] or [[2,2,2]].
kpts_shift (3x1 array): Shift for Kpoints.
kpts_weights: Optional weights for kpoints. Weights should be
integers. For explicit kpoints.
coord_type: In line-mode, this variable specifies whether the
Kpoints were given in Cartesian or Reciprocal coordinates.
labels: In line-mode, this should provide a list of labels for
each kpt. It is optional in explicit kpoint mode as comments for
k-points.
tet_number: For explicit kpoints, specifies the number of
tetrahedrons for the tetrahedron method.
tet_weight: For explicit kpoints, specifies the weight for each
tetrahedron for the tetrahedron method.
tet_connections: For explicit kpoints, specifies the connections
of the tetrahedrons for the tetrahedron method.
Format is a list of tuples, [ (sym_weight, [tet_vertices]),
...]
The default behavior of the constructor is for a Gamma centered,
1x1x1 KPOINTS with no shift.
"""
if num_kpts > 0 and (not labels) and (not kpts_weights):
raise ValueError(
"For explicit or line-mode kpoints, either the "
"labels or kpts_weights must be specified."
)
self.comment = comment
self.num_kpts = num_kpts
self.kpts = kpts
self.style = style
self.coord_type = coord_type
self.kpts_weights = kpts_weights
self.kpts_shift = kpts_shift
self.labels = labels
self.tet_number = tet_number
self.tet_weight = tet_weight
self.tet_connections = tet_connections
@property
def style(self):
"""
:return: Style for kpoint generation. One of Kpoints_supported_modes
enum.
"""
return self._style
@style.setter
def style(self, style):
"""
:param style: Style
:return: Sets the style for the Kpoints. One of Kpoints_supported_modes
enum.
"""
if isinstance(style, str):
style = Kpoints.supported_modes.from_string(style)
if (
style
in (
Kpoints.supported_modes.Automatic,
Kpoints.supported_modes.Gamma,
Kpoints.supported_modes.Monkhorst,
)
and len(self.kpts) > 1
):
raise ValueError(
"For fully automatic or automatic gamma or monk "
"kpoints, only a single line for the number of "
"divisions is allowed."
)
self._style = style
@staticmethod
def automatic(subdivisions):
"""
Convenient static constructor for a fully automatic Kpoint grid, with
gamma centered Monkhorst-Pack grids and the number of subdivisions
along each reciprocal lattice vector determined by the scheme in the
VASP manual.
Args:
subdivisions: Parameter determining number of subdivisions along
each reciprocal lattice vector.
Returns:
Kpoints object
"""
return Kpoints(
"Fully automatic kpoint scheme",
0,
style=Kpoints.supported_modes.Automatic,
kpts=[[subdivisions]],
)
@staticmethod
def gamma_automatic(kpts=(1, 1, 1), shift=(0, 0, 0)):
"""
Convenient static constructor for an automatic Gamma centered Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (1,1,1)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
Returns:
Kpoints object
"""
return Kpoints(
"Automatic kpoint scheme",
0,
Kpoints.supported_modes.Gamma,
kpts=[kpts],
kpts_shift=shift,
)
@staticmethod
def monkhorst_automatic(kpts=(2, 2, 2), shift=(0, 0, 0)):
"""
Convenient static constructor for an automatic Monkhorst pack Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (2,2,2)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
Returns:
Kpoints object
"""
return Kpoints(
"Automatic kpoint scheme",
0,
Kpoints.supported_modes.Monkhorst,
kpts=[kpts],
kpts_shift=shift,
)
@staticmethod
def automatic_density(structure, kppa, force_gamma=False):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes for hexagonal cells and
Monkhorst-Pack grids otherwise.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
Args:
structure (Structure): Input structure
kppa (int): Grid density
force_gamma (bool): Force a gamma centered mesh (default is to
use gamma only for hexagonal cells or odd meshes)
Returns:
Kpoints
"""
comment = "pymatgen v%s with grid density = %.0f / atom" % (__version__, kppa)
if math.fabs((math.floor(kppa ** (1 / 3) + 0.5)) ** 3 - kppa) < 1:
kppa += kppa * 0.01
latt = structure.lattice
lengths = latt.abc
ngrid = kppa / structure.num_sites
mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)
num_div = [int(math.floor(max(mult / l, 1))) for l in lengths]
is_hexagonal = latt.is_hexagonal()
has_odd = any([i % 2 == 1 for i in num_div])
if has_odd or is_hexagonal or force_gamma:
style = Kpoints.supported_modes.Gamma
else:
style = Kpoints.supported_modes.Monkhorst
return Kpoints(comment, 0, style, [num_div], [0, 0, 0])
@staticmethod
def automatic_gamma_density(structure, kppa):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes always. For GW.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
Args:
structure:
Input structure
kppa:
Grid density
"""
latt = structure.lattice
lengths = latt.abc
ngrid = kppa / structure.num_sites
mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)
num_div = [int(round(mult / l)) for l in lengths]
# ensure that numDiv[i] > 0
num_div = [i if i > 0 else 1 for i in num_div]
# VASP documentation recommends to use even grids for n <= 8 and odd
# grids for n > 8.
num_div = [i + i % 2 if i <= 8 else i - i % 2 + 1 for i in num_div]
style = Kpoints.supported_modes.Gamma
comment = (
"pymatgen 4.7.6+ generated KPOINTS with grid density = "
+ "{} / atom".format(kppa)
)
num_kpts = 0
return Kpoints(comment, num_kpts, style, [num_div], [0, 0, 0])
@staticmethod
def automatic_density_by_vol(structure, kppvol, force_gamma=False):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density per inverse Angstrom^3 of reciprocal cell.
Algorithm:
Same as automatic_density()
Args:
structure (Structure): Input structure
kppvol (int): Grid density per Angstrom^(-3) of reciprocal cell
force_gamma (bool): Force a gamma centered mesh
Returns:
Kpoints
"""
vol = structure.lattice.reciprocal_lattice.volume
kppa = kppvol * vol * structure.num_sites
return Kpoints.automatic_density(structure, kppa, force_gamma=force_gamma)
@staticmethod
def automatic_linemode(divisions, ibz):
"""
Convenient static constructor for a KPOINTS in mode line_mode.
gamma centered Monkhorst-Pack grids and the number of subdivisions
along each reciprocal lattice vector determined by the scheme in the
VASP manual.
Args:
divisions: Parameter determining the number of k-points along each
hight symetry lines.
ibz: HighSymmKpath object (pymatgen.symmetry.bandstructure)
Returns:
Kpoints object
"""
kpoints = list()
labels = list()
for path in ibz.kpath["path"]:
kpoints.append(ibz.kpath["kpoints"][path[0]])
labels.append(path[0])
for i in range(1, len(path) - 1):
kpoints.append(ibz.kpath["kpoints"][path[i]])
labels.append(path[i])
kpoints.append(ibz.kpath["kpoints"][path[i]])
labels.append(path[i])
kpoints.append(ibz.kpath["kpoints"][path[-1]])
labels.append(path[-1])
return Kpoints(
"Line_mode KPOINTS file",
style=Kpoints.supported_modes.Line_mode,
coord_type="Reciprocal",
kpts=kpoints,
labels=labels,
num_kpts=int(divisions),
)
@staticmethod
def from_file(filename):
"""
Reads a Kpoints object from a KPOINTS file.
Args:
filename (str): filename to read from.
Returns:
Kpoints object
"""
with zopen(filename, "rt") as f:
return Kpoints.from_string(f.read())
@staticmethod
def from_string(string):
"""
Reads a Kpoints object from a KPOINTS string.
Args:
string (str): KPOINTS string.
Returns:
Kpoints object
"""
lines = [line.strip() for line in string.splitlines()]
comment = lines[0]
num_kpts = int(lines[1].split()[0].strip())
style = lines[2].lower()[0]
# Fully automatic KPOINTS
if style == "a":
return Kpoints.automatic(int(lines[3]))
coord_pattern = re.compile(
r"^\s*([\d+.\-Ee]+)\s+([\d+.\-Ee]+)\s+" r"([\d+.\-Ee]+)"
)
# Automatic gamma and Monk KPOINTS, with optional shift
if style == "g" or style == "m":
kpts = [int(i) for i in lines[3].split()]
kpts_shift = (0, 0, 0)
if len(lines) > 4 and coord_pattern.match(lines[4]):
try:
kpts_shift = [float(i) for i in lines[4].split()]
except ValueError:
pass
return (
Kpoints.gamma_automatic(kpts, kpts_shift)
if style == "g"
else Kpoints.monkhorst_automatic(kpts, kpts_shift)
)
# Automatic kpoints with basis
if num_kpts <= 0:
style = (
Kpoints.supported_modes.Cartesian
if style in "ck"
else Kpoints.supported_modes.Reciprocal
)
kpts = [[float(j) for j in lines[i].split()] for i in range(3, 6)]
kpts_shift = [float(i) for i in lines[6].split()]
return Kpoints(
comment=comment,
num_kpts=num_kpts,
style=style,
kpts=kpts,
kpts_shift=kpts_shift,
)
# Line-mode KPOINTS, usually used with band structures
if style == "l":
coord_type = "Cartesian" if lines[3].lower()[0] in "ck" else "Reciprocal"
style = Kpoints.supported_modes.Line_mode
kpts = []
labels = []
patt = re.compile(
r"([e0-9.\-]+)\s+([e0-9.\-]+)\s+([e0-9.\-]+)" r"\s*!*\s*(.*)"
)
for i in range(4, len(lines)):
line = lines[i]
m = patt.match(line)
if m:
kpts.append(
[float(m.group(1)), float(m.group(2)), float(m.group(3))]
)
labels.append(m.group(4).strip())
return Kpoints(
comment=comment,
num_kpts=num_kpts,
style=style,
kpts=kpts,
coord_type=coord_type,
labels=labels,
)
# Assume explicit KPOINTS if all else fails.
style = (
Kpoints.supported_modes.Cartesian
if style in "ck"
else Kpoints.supported_modes.Reciprocal
)
kpts = []
kpts_weights = []
labels = []
tet_number = 0
tet_weight = 0
tet_connections = None
for i in range(3, 3 + num_kpts):
toks = lines[i].split()
kpts.append([float(j) for j in toks[0:3]])
kpts_weights.append(float(toks[3]))
if len(toks) > 4:
labels.append(toks[4])
else:
labels.append(None)
try:
# Deal with tetrahedron method
if lines[3 + num_kpts].strip().lower()[0] == "t":
toks = lines[4 + num_kpts].split()
tet_number = int(toks[0])
tet_weight = float(toks[1])
tet_connections = []
for i in range(5 + num_kpts, 5 + num_kpts + tet_number):
toks = lines[i].split()
tet_connections.append(
(int(toks[0]), [int(toks[j]) for j in range(1, 5)])
)
except IndexError:
pass
return Kpoints(
comment=comment,
num_kpts=num_kpts,
style=Kpoints.supported_modes[str(style)],
kpts=kpts,
kpts_weights=kpts_weights,
tet_number=tet_number,
tet_weight=tet_weight,
tet_connections=tet_connections,
labels=labels,
)
def write_file(self, filename):
"""
Write Kpoints to a file.
Args:
filename (str): Filename to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
def __repr__(self):
return self.__str__()
def __str__(self):
lines = [self.comment, str(self.num_kpts), self.style.name]
style = self.style.name.lower()[0]
if style == "l":
lines.append(self.coord_type)
for i in range(len(self.kpts)):
lines.append(" ".join([str(x) for x in self.kpts[i]]))
if style == "l":
lines[-1] += " ! " + self.labels[i]
if i % 2 == 1:
lines[-1] += "\n"
elif self.num_kpts > 0:
if self.labels is not None:
lines[-1] += " %i %s" % (self.kpts_weights[i], self.labels[i])
else:
lines[-1] += " %i" % (self.kpts_weights[i])
# Print tetrahedron parameters if the number of tetrahedrons > 0
if style not in "lagm" and self.tet_number > 0:
lines.append("Tetrahedron")
lines.append("%d %f" % (self.tet_number, self.tet_weight))
for sym_weight, vertices in self.tet_connections:
lines.append(
"%d %d %d %d %d"
% (sym_weight, vertices[0], vertices[1], vertices[2], vertices[3])
)
# Print shifts for automatic kpoints types if not zero.
if self.num_kpts <= 0 and tuple(self.kpts_shift) != (0, 0, 0):
lines.append(" ".join([str(x) for x in self.kpts_shift]))
return "\n".join(lines) + "\n"
def as_dict(self):
"""
:return: MSONable dict.
"""
d = {
"comment": self.comment,
"nkpoints": self.num_kpts,
"generation_style": self.style.name,
"kpoints": self.kpts,
"usershift": self.kpts_shift,
"kpts_weights": self.kpts_weights,
"coord_type": self.coord_type,
"labels": self.labels,
"tet_number": self.tet_number,
"tet_weight": self.tet_weight,
"tet_connections": self.tet_connections,
}
optional_paras = ["genvec1", "genvec2", "genvec3", "shift"]
for para in optional_paras:
if para in self.__dict__:
d[para] = self.__dict__[para]
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation.
:return: Kpoints
"""
comment = d.get("comment", "")
generation_style = d.get("generation_style")
kpts = d.get("kpoints", [[1, 1, 1]])
kpts_shift = d.get("usershift", [0, 0, 0])
num_kpts = d.get("nkpoints", 0)
return cls(
comment=comment,
kpts=kpts,
style=generation_style,
kpts_shift=kpts_shift,
num_kpts=num_kpts,
kpts_weights=d.get("kpts_weights"),
coord_type=d.get("coord_type"),
labels=d.get("labels"),
tet_number=d.get("tet_number", 0),
tet_weight=d.get("tet_weight", 0),
tet_connections=d.get("tet_connections"),
)
def _parse_string(s):
return "{}".format(s.strip())
def _parse_bool(s):
m = re.match(r"^\.?([TFtf])[A-Za-z]*\.?", s)
if m:
if m.group(1) == "T" or m.group(1) == "t":
return True
else:
return False
raise ValueError(s + " should be a boolean type!")
def _parse_float(s):
return float(re.search(r"^-?\d*\.?\d*[eE]?-?\d*", s).group(0))
def _parse_int(s):
return int(re.match(r"^-?[0-9]+", s).group(0))
def _parse_list(s):
return [float(y) for y in re.split(r"\s+", s.strip()) if not y.isalpha()]
Orbital = namedtuple("Orbital", ["n", "l", "j", "E", "occ"])
OrbitalDescription = namedtuple(
"OrbitalDescription", ["l", "E", "Type", "Rcut", "Type2", "Rcut2"]
)
class BadPotcarWarning(UserWarning):
"""
Warning raised when POTCAR hashes do not pass validation
"""
pass
class PotcarSingle:
"""
Object for a **single** POTCAR. The builder assumes the POTCAR contains
the complete untouched data in "data" as a string and a dict of keywords.
.. attribute:: data
POTCAR data as a string.
.. attribute:: keywords
Keywords parsed from the POTCAR as a dict. All keywords are also
accessible as attributes in themselves. E.g., potcar.enmax,
potcar.encut, etc.
md5 hashes of the entire POTCAR file and the actual data are validated
against a database of known good hashes. Appropriate warnings or errors
are raised if a POTCAR hash fails validation.
"""
functional_dir = {
"PBE": "POT_GGA_PAW_PBE",
"PBE_52": "POT_GGA_PAW_PBE_52",
"PBE_54": "POT_GGA_PAW_PBE_54",
"LDA": "POT_LDA_PAW",
"LDA_52": "POT_LDA_PAW_52",
"LDA_54": "POT_LDA_PAW_54",
"PW91": "POT_GGA_PAW_PW91",
"LDA_US": "POT_LDA_US",
"PW91_US": "POT_GGA_US_PW91",
"Perdew-Zunger81": "POT_LDA_PAW",
}
functional_tags = {
"pe": {"name": "PBE", "class": "GGA"},
"91": {"name": "PW91", "class": "GGA"},
"rp": {"name": "revPBE", "class": "GGA"},
"am": {"name": "AM05", "class": "GGA"},
"ps": {"name": "PBEsol", "class": "GGA"},
"pw": {"name": "PW86", "class": "GGA"},
"lm": {"name": "Langreth-Mehl-Hu", "class": "GGA"},
"pb": {"name": "Perdew-Becke", "class": "GGA"},
"ca": {"name": "Perdew-Zunger81", "class": "LDA"},
"hl": {"name": "Hedin-Lundquist", "class": "LDA"},
"wi": {"name": "Wigner Interpoloation", "class": "LDA"},
}
parse_functions = {
"LULTRA": _parse_bool,
"LUNSCR": _parse_bool,
"LCOR": _parse_bool,
"LPAW": _parse_bool,
"EATOM": _parse_float,
"RPACOR": _parse_float,
"POMASS": _parse_float,
"ZVAL": _parse_float,
"RCORE": _parse_float,
"RWIGS": _parse_float,
"ENMAX": _parse_float,
"ENMIN": _parse_float,
"EMMIN": _parse_float,
"EAUG": _parse_float,
"DEXC": _parse_float,
"RMAX": _parse_float,
"RAUG": _parse_float,
"RDEP": _parse_float,
"RDEPT": _parse_float,
"QCUT": _parse_float,
"QGAM": _parse_float,
"RCLOC": _parse_float,
"IUNSCR": _parse_int,
"ICORE": _parse_int,
"NDATA": _parse_int,
"VRHFIN": _parse_string,
"LEXCH": _parse_string,
"TITEL": _parse_string,
"STEP": _parse_list,
"RRKJ": _parse_list,
"GGA": _parse_list,
}
def __init__(self, data, symbol=None):
"""
Args:
data:
Complete and single potcar file as a string.
symbol:
POTCAR symbol corresponding to the filename suffix
e.g. "Tm_3" for POTCAR.TM_3". If not given, pymatgen
will attempt to extract the symbol from the file itself.
However, this is not always reliable!
"""
self.data = data # raw POTCAR as a string
# Vasp parses header in vasprun.xml and this differs from the titel
self.header = data.split("\n")[0].strip()
search_lines = re.search(
r"(?s)(parameters from PSCTR are:" r".*?END of PSCTR-controll parameters)",
data,
).group(1)
self.keywords = {}
for key, val in re.findall(
r"(\S+)\s*=\s*(.*?)(?=;|$)", search_lines, flags=re.MULTILINE
):
try:
self.keywords[key] = self.parse_functions[key](val)
except KeyError:
warnings.warn("Ignoring unknown variable type %s" % key)
PSCTR = OrderedDict()
array_search = re.compile(r"(-*[0-9.]+)")
orbitals = []
descriptions = []
atomic_configuration = re.search(
r"Atomic configuration\s*\n?" r"(.*?)Description", search_lines
)
if atomic_configuration:
lines = atomic_configuration.group(1).splitlines()
num_entries = re.search(r"([0-9]+)", lines[0]).group(1)
num_entries = int(num_entries)
PSCTR["nentries"] = num_entries
for line in lines[1:]:
orbit = array_search.findall(line)
if orbit:
orbitals.append(
self.Orbital(
int(orbit[0]),
int(orbit[1]),
float(orbit[2]),
float(orbit[3]),
float(orbit[4]),
)
)
PSCTR["Orbitals"] = tuple(orbitals)
description_string = re.search(
r"(?s)Description\s*\n"
r"(.*?)Error from kinetic"
r" energy argument \(eV\)",
search_lines,
)
if description_string:
for line in description_string.group(1).splitlines():
description = array_search.findall(line)
if description:
descriptions.append(
OrbitalDescription(
int(description[0]),
float(description[1]),
int(description[2]),
float(description[3]),
int(description[4]) if len(description) > 4 else None,
float(description[5]) if len(description) > 4 else None,
)
)
if descriptions:
PSCTR["OrbitalDescriptions"] = tuple(descriptions)
rrkj_kinetic_energy_string = re.search(
r"(?s)Error from kinetic energy argument \(eV\)\s*\n"
r"(.*?)END of PSCTR-controll parameters",
search_lines,
)
rrkj_array = []
if rrkj_kinetic_energy_string:
for line in rrkj_kinetic_energy_string.group(1).splitlines():
if "=" not in line:
rrkj_array += _parse_list(line.strip("\n"))
if rrkj_array:
PSCTR["RRKJ"] = tuple(rrkj_array)
PSCTR.update(self.keywords)
self.PSCTR = OrderedDict(sorted(PSCTR.items(), key=lambda x: x[0]))
if symbol:
self._symbol = symbol
else:
try:
self._symbol = self.keywords["TITEL"].split(" ")[1].strip()
except IndexError:
self._symbol = self.keywords["TITEL"].strip()
# Compute the POTCAR hashes and check them against the database of known
# VASP POTCARs
self.hash = self.get_potcar_hash()
self.file_hash = self.get_potcar_file_hash()
if self.identify_potcar(mode='data')[0] == []:
warnings.warn("POTCAR data with symbol {} does not match any VASP\
POTCAR known to pymatgen. We advise verifying the\
integrity of your POTCAR files.".format(self.symbol),
BadPotcarWarning)
elif self.identify_potcar(mode='file')[0] == []:
warnings.warn("POTCAR with symbol {} has metadata that does not match\
any VASP POTCAR known to pymatgen. The data in this\
POTCAR is known to match the following functionals:\
{}".format(self.symbol, self.identify_potcar(mode='data')[0]),
BadPotcarWarning)
def __str__(self):
return self.data + "\n"
@property
def electron_configuration(self):
"""
:return: Electronic configuration of the PotcarSingle.
"""
if not self.nelectrons.is_integer():
warnings.warn(
"POTCAR has non-integer charge, "
"electron configuration not well-defined."
)
return None
el = Element.from_Z(self.atomic_no)
full_config = el.full_electronic_structure
nelect = self.nelectrons
config = []
while nelect > 0:
e = full_config.pop(-1)
config.append(e)
nelect -= e[-1]
return config
def write_file(self, filename: str) -> None:
"""
Writes PotcarSingle to a file.
:param filename: Filename
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
@staticmethod
def from_file(filename: str) -> "PotcarSingle":
"""
Reads PotcarSingle from file.
:param filename: Filename.
:return: PotcarSingle.
"""
match = re.search(r"(?<=POTCAR\.)(.*)(?=.gz)", str(filename))
if match:
symbol = match.group(0)
else:
symbol = ""
try:
with zopen(filename, "rt") as f:
return PotcarSingle(f.read(), symbol=symbol or None)
except UnicodeDecodeError:
warnings.warn(
"POTCAR contains invalid unicode errors. "
"We will attempt to read it by ignoring errors."
)
import codecs
with codecs.open(filename, "r", encoding="utf-8", errors="ignore") as f:
return PotcarSingle(f.read(), symbol=symbol or None)
@staticmethod
def from_symbol_and_functional(symbol: str, functional: str = None):
"""
Makes a PotcarSingle from a symbol and functional.
:param symbol: Symbol, e.g., Li_sv
:param functional: E.g., PBE
:return: PotcarSingle
"""
if functional is None:
functional = SETTINGS.get("PMG_DEFAULT_FUNCTIONAL", "PBE")
funcdir = PotcarSingle.functional_dir[functional]
d = SETTINGS.get("PMG_VASP_PSP_DIR")
if d is None:
raise ValueError(
"No POTCAR for %s with functional %s found. "
"Please set the PMG_VASP_PSP_DIR environment in "
".pmgrc.yaml, or you may need to set "
"PMG_DEFAULT_FUNCTIONAL to PBE_52 or PBE_54 if you "
"are using newer psps from VASP." % (symbol, functional)
)
paths_to_try = [
os.path.join(d, funcdir, "POTCAR.{}".format(symbol)),
os.path.join(d, funcdir, symbol, "POTCAR"),
]
for p in paths_to_try:
p = os.path.expanduser(p)
p = zpath(p)
if os.path.exists(p):
psingle = PotcarSingle.from_file(p)
return psingle
raise IOError(
"You do not have the right POTCAR with functional "
+ "{} and label {} in your VASP_PSP_DIR".format(functional, symbol)
)
@property
def element(self):
"""
Attempt to return the atomic symbol based on the VRHFIN keyword.
"""
element = self.keywords["VRHFIN"].split(":")[0].strip()
try:
return Element(element).symbol
except ValueError:
# VASP incorrectly gives the element symbol for Xe as "X"
# Some potentials, e.g., Zr_sv, gives the symbol as r.
if element == "X":
return "Xe"
return Element(self.symbol.split("_")[0]).symbol
@property
def atomic_no(self) -> int:
"""
Attempt to return the atomic number based on the VRHFIN keyword.
"""
return Element(self.element).Z
@property
def nelectrons(self):
"""
:return: Number of electrons
"""
return self.zval
@property
def symbol(self):
"""
:return: The POTCAR symbol, e.g. W_pv
"""
return self._symbol
@property
def potential_type(self) -> str:
"""
:return: Type of PSP. E.g., US, PAW, etc.
"""
if self.lultra:
return "US"
elif self.lpaw:
return "PAW"
else:
return "NC"
@property
def functional(self):
"""
:return: Functional associated with PotcarSingle.
"""
return self.functional_tags.get(self.LEXCH.lower(), {}).get("name")
@property
def functional_class(self):
"""
:return: Functional class associated with PotcarSingle.
"""
return self.functional_tags.get(self.LEXCH.lower(), {}).get("class")
def identify_potcar(self, mode: str = 'data'):
"""
Identify the symbol and compatible functionals associated with this PotcarSingle.
This method checks the md5 hash of either the POTCAR data (PotcarSingle.hash)
or the entire POTCAR file (PotcarSingle.file_hash) against a database
of hashes for POTCARs distributed with VASP 5.4.4.
Args:
mode (str): 'data' or 'file'. 'data' mode checks the hash of the POTCAR
data itself, while 'file' mode checks the hash of the entire
POTCAR file, including metadata.
Returns:
symbol (List): List of symbols associated with the PotcarSingle
potcar_functionals (List): List of potcar functionals associated with
the PotcarSingle
"""
# Dict to translate the sets in the .json file to the keys used in
# DictSet
mapping_dict = {'potUSPP_GGA': {"pymatgen_key": "PW91_US",
"vasp_description": "Ultrasoft pseudo potentials\
for LDA and PW91 (dated 2002-08-20 and 2002-04-08,\
respectively). These files are outdated, not\
supported and only distributed as is."},
'potUSPP_LDA': {"pymatgen_key": "LDA_US",
"vasp_description": "Ultrasoft pseudo potentials\
for LDA and PW91 (dated 2002-08-20 and 2002-04-08,\
respectively). These files are outdated, not\
supported and only distributed as is."},
'potpaw_GGA': {"pymatgen_key": "PW91",
"vasp_description": "The LDA, PW91 and PBE PAW datasets\
(snapshot: 05-05-2010, 19-09-2006 and 06-05-2010,\
respectively). These files are outdated, not\
supported and only distributed as is."},
'potpaw_LDA': {"pymatgen_key": "Perdew-Zunger81",
"vasp_description": "The LDA, PW91 and PBE PAW datasets\
(snapshot: 05-05-2010, 19-09-2006 and 06-05-2010,\
respectively). These files are outdated, not\
supported and only distributed as is."},
'potpaw_LDA.52': {"pymatgen_key": "LDA_52",
"vasp_description": "LDA PAW datasets version 52,\
including the early GW variety (snapshot 19-04-2012).\
When read by VASP these files yield identical results\
as the files distributed in 2012 ('unvie' release)."},
'potpaw_LDA.54': {"pymatgen_key": "LDA_54",
"vasp_description": "LDA PAW datasets version 54,\
including the GW variety (original release 2015-09-04).\
When read by VASP these files yield identical results as\
the files distributed before."},
'potpaw_PBE': {"pymatgen_key": "PBE",
"vasp_description": "The LDA, PW91 and PBE PAW datasets\
(snapshot: 05-05-2010, 19-09-2006 and 06-05-2010,\
respectively). These files are outdated, not\
supported and only distributed as is."},
'potpaw_PBE.52': {"pymatgen_key": "PBE_52",
"vasp_description": "PBE PAW datasets version 52,\
including early GW variety (snapshot 19-04-2012).\
When read by VASP these files yield identical\
results as the files distributed in 2012."},
'potpaw_PBE.54': {"pymatgen_key": "PBE_54",
"vasp_description": "PBE PAW datasets version 54,\
including the GW variety (original release 2015-09-04).\
When read by VASP these files yield identical results as\
the files distributed before."},
'unvie_potpaw.52': {"pymatgen_key": "unvie_LDA_52",
"vasp_description": "files released previously\
for vasp.5.2 (2012-04) and vasp.5.4 (2015-09-04)\
by univie."},
'unvie_potpaw.54': {"pymatgen_key": "unvie_LDA_54",
"vasp_description": "files released previously\
for vasp.5.2 (2012-04) and vasp.5.4 (2015-09-04)\
by univie."},
'unvie_potpaw_PBE.52': {"pymatgen_key": "unvie_PBE_52",
"vasp_description": "files released previously\
for vasp.5.2 (2012-04) and vasp.5.4 (2015-09-04)\
by univie."},
'unvie_potpaw_PBE.54': {"pymatgen_key": "unvie_PBE_52",
"vasp_description": "files released previously\
for vasp.5.2 (2012-04) and vasp.5.4 (2015-09-04)\
by univie."}
}
cwd = os.path.abspath(os.path.dirname(__file__))
if mode == 'data':
hash_db = loadfn(os.path.join(cwd, "vasp_potcar_pymatgen_hashes.json"))
potcar_hash = self.hash
elif mode == 'file':
hash_db = loadfn(os.path.join(cwd, "vasp_potcar_file_hashes.json"))
potcar_hash = self.file_hash
else:
raise ValueError("Bad 'mode' argument. Specify 'data' or 'file'.")
identity = hash_db.get(potcar_hash)
if identity:
# convert the potcar_functionals from the .json dict into the functional
# keys that pymatgen uses
potcar_functionals = []
for i in identity["potcar_functionals"]:
potcar_functionals.append(mapping_dict[i]["pymatgen_key"])
potcar_functionals = list(set(potcar_functionals))
return potcar_functionals, identity["potcar_symbols"]
else:
return [], []
def get_potcar_file_hash(self):
"""
Computes a hash of the entire PotcarSingle.
This hash corresponds to the md5 hash of the POTCAR file itself.
:return: Hash value.
"""
return md5(self.data.encode("utf-8")).hexdigest()
def get_potcar_hash(self):
"""
Computes a md5 hash of the data defining the PotcarSingle.
:return: Hash value.
"""
hash_str = ""
for k, v in self.PSCTR.items():
hash_str += "{}".format(k)
if isinstance(v, int):
hash_str += "{}".format(v)
elif isinstance(v, float):
hash_str += "{:.3f}".format(v)
elif isinstance(v, bool):
hash_str += "{}".format(bool)
elif isinstance(v, (tuple, list)):
for item in v:
if isinstance(item, float):
hash_str += "{:.3f}".format(item)
elif isinstance(item, (Orbital, OrbitalDescription)):
for item_v in item:
if isinstance(item_v, (int, str)):
hash_str += "{}".format(item_v)
elif isinstance(item_v, float):
hash_str += "{:.3f}".format(item_v)
else:
hash_str += "{}".format(item_v) if item_v else ""
else:
hash_str += v.replace(" ", "")
self.hash_str = hash_str
return md5(hash_str.lower().encode("utf-8")).hexdigest()
def __getattr__(self, a):
"""
Delegates attributes to keywords. For example, you can use
potcarsingle.enmax to get the ENMAX of the POTCAR.
For float type properties, they are converted to the correct float. By
default, all energies in eV and all length scales are in Angstroms.
"""
try:
return self.keywords[a.upper()]
except Exception:
raise AttributeError(a)
class Potcar(list, MSONable):
"""
Object for reading and writing POTCAR files for calculations. Consists of a
list of PotcarSingle.
"""
FUNCTIONAL_CHOICES = list(PotcarSingle.functional_dir.keys())
def __init__(self, symbols=None, functional=None, sym_potcar_map=None):
"""
Args:
symbols ([str]): Element symbols for POTCAR. This should correspond
to the symbols used by VASP. E.g., "Mg", "Fe_pv", etc.
functional (str): Functional used. To know what functional options
there are, use Potcar.FUNCTIONAL_CHOICES. Note that VASP has
different versions of the same functional. By default, the old
PBE functional is used. If you want the newer ones, use PBE_52 or
PBE_54. Note that if you intend to compare your results with the
Materials Project, you should use the default setting. You can also
override the default by setting PMG_DEFAULT_FUNCTIONAL in your
.pmgrc.yaml.
sym_potcar_map (dict): Allows a user to specify a specific element
symbol to raw POTCAR mapping.
"""
if functional is None:
functional = SETTINGS.get("PMG_DEFAULT_FUNCTIONAL", "PBE")
super().__init__()
self.functional = functional
if symbols is not None:
self.set_symbols(symbols, functional, sym_potcar_map)
def as_dict(self):
"""
:return: MSONable dict representation
"""
return {
"functional": self.functional,
"symbols": self.symbols,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
}
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: Potcar
"""
return Potcar(symbols=d["symbols"], functional=d["functional"])
@staticmethod
def from_file(filename: str):
"""
Reads Potcar from file.
:param filename: Filename
:return: Potcar
"""
try:
with zopen(filename, "rt") as f:
fdata = f.read()
except UnicodeDecodeError:
warnings.warn(
"POTCAR contains invalid unicode errors. "
"We will attempt to read it by ignoring errors."
)
import codecs
with codecs.open(filename, "r", encoding="utf-8", errors="ignore") as f:
fdata = f.read()
potcar = Potcar()
potcar_strings = re.compile(r"\n?(\s*.*?End of Dataset)", re.S).findall(fdata)
functionals = []
for p in potcar_strings:
single = PotcarSingle(p)
potcar.append(single)
functionals.append(single.functional)
if len(set(functionals)) != 1:
raise ValueError("File contains incompatible functionals!")
else:
potcar.functional = functionals[0]
return potcar
def __str__(self):
return "\n".join([str(potcar).strip("\n") for potcar in self]) + "\n"
def write_file(self, filename):
"""
Write Potcar to a file.
Args:
filename (str): filename to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
@property
def symbols(self):
"""
Get the atomic symbols of all the atoms in the POTCAR file.
"""
return [p.symbol for p in self]
@symbols.setter
def symbols(self, symbols):
self.set_symbols(symbols, functional=self.functional)
@property
def spec(self):
"""
Get the atomic symbols and hash of all the atoms in the POTCAR file.
"""
return [{"symbol": p.symbol, "hash": p.get_potcar_hash()} for p in self]
def set_symbols(self, symbols, functional=None, sym_potcar_map=None):
"""
Initialize the POTCAR from a set of symbols. Currently, the POTCARs can
be fetched from a location specified in .pmgrc.yaml. Use pmg config
to add this setting.
Args:
symbols ([str]): A list of element symbols
functional (str): The functional to use. If None, the setting
PMG_DEFAULT_FUNCTIONAL in .pmgrc.yaml is used, or if this is
not set, it will default to PBE.
sym_potcar_map (dict): A map of symbol:raw POTCAR string. If
sym_potcar_map is specified, POTCARs will be generated from
the given map data rather than the config file location.
"""
del self[:]
if sym_potcar_map:
for el in symbols:
self.append(PotcarSingle(sym_potcar_map[el]))
else:
for el in symbols:
p = PotcarSingle.from_symbol_and_functional(el, functional)
self.append(p)
class VaspInput(dict, MSONable):
"""
Class to contain a set of vasp input objects corresponding to a run.
"""
def __init__(self, incar, kpoints, poscar, potcar, optional_files=None, **kwargs):
"""
Args:
incar: Incar object.
kpoints: Kpoints object.
poscar: Poscar object.
potcar: Potcar object.
optional_files: Other input files supplied as a dict of {
filename: object}. The object should follow standard pymatgen
conventions in implementing a as_dict() and from_dict method.
"""
super().__init__(**kwargs)
self.update(
{"INCAR": incar, "KPOINTS": kpoints, "POSCAR": poscar, "POTCAR": potcar}
)
if optional_files is not None:
self.update(optional_files)
def __str__(self):
output = []
for k, v in self.items():
output.append(k)
output.append(str(v))
output.append("")
return "\n".join(output)
def as_dict(self):
"""
:return: MSONable dict.
"""
d = {k: v.as_dict() for k, v in self.items()}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation.
:return: VaspInput
"""
dec = MontyDecoder()
sub_d = {"optional_files": {}}
for k, v in d.items():
if k in ["INCAR", "POSCAR", "POTCAR", "KPOINTS"]:
sub_d[k.lower()] = dec.process_decoded(v)
elif k not in ["@module", "@class"]:
sub_d["optional_files"][k] = dec.process_decoded(v)
return cls(**sub_d)
def write_input(self, output_dir=".", make_dir_if_not_present=True):
"""
Write VASP input to a directory.
Args:
output_dir (str): Directory to write to. Defaults to current
directory (".").
make_dir_if_not_present (bool): Create the directory if not
present. Defaults to True.
"""
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
for k, v in self.items():
if v is not None:
with zopen(os.path.join(output_dir, k), "wt") as f:
f.write(v.__str__())
@staticmethod
def from_directory(input_dir, optional_files=None):
"""
Read in a set of VASP input from a directory. Note that only the
standard INCAR, POSCAR, POTCAR and KPOINTS files are read unless
optional_filenames is specified.
Args:
input_dir (str): Directory to read VASP input from.
optional_files (dict): Optional files to read in as well as a
dict of {filename: Object type}. Object type must have a
static method from_file.
"""
sub_d = {}
for fname, ftype in [
("INCAR", Incar),
("KPOINTS", Kpoints),
("POSCAR", Poscar),
("POTCAR", Potcar),
]:
try:
fullzpath = zpath(os.path.join(input_dir, fname))
sub_d[fname.lower()] = ftype.from_file(fullzpath)
except FileNotFoundError: # handle the case where there is no KPOINTS file
sub_d[fname.lower()] = None
pass
sub_d["optional_files"] = {}
if optional_files is not None:
for fname, ftype in optional_files.items():
sub_d["optional_files"][fname] = ftype.from_file(
os.path.join(input_dir, fname)
)
return VaspInput(**sub_d)
def run_vasp(
self,
run_dir: PathLike = ".",
vasp_cmd: list = None,
output_file: PathLike = "vasp.out",
err_file: PathLike = "vasp.err",
):
"""
Write input files and run VASP.
:param run_dir: Where to write input files and do the run.
:param vasp_cmd: Args to be supplied to run VASP. Otherwise, the
PMG_VASP_EXE in .pmgrc.yaml is used.
:param output_file: File to write output.
:param err_file: File to write err.
"""
self.write_input(output_dir=run_dir)
vasp_cmd = vasp_cmd or SETTINGS.get("PMG_VASP_EXE")
vasp_cmd = [os.path.expanduser(os.path.expandvars(t)) for t in vasp_cmd]
if not vasp_cmd:
raise RuntimeError(
"You need to supply vasp_cmd or set the PMG_VASP_EXE in .pmgrc.yaml to run VASP."
)
with cd(run_dir):
with open(output_file, "w") as f_std, open(
err_file, "w", buffering=1
) as f_err:
subprocess.check_call(vasp_cmd, stdout=f_std, stderr=f_err)
|
gVallverdu/pymatgen
|
pymatgen/io/vasp/inputs.py
|
Python
|
mit
| 89,456
|
[
"VASP",
"pymatgen"
] |
c5318a9bdf4c2957619a8c49e393ca95cb73db63375f9c4e7e53aedcdd8da40f
|
# coding=utf-8
# coding=utf-8
# Copyright 2019 The RecSim Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for Bandit Algorithms for Generalized Linear Models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
from scipy import special
import six
@six.add_metaclass(abc.ABCMeta)
class GLMAlgorithm(object):
"""Base class for Generalized Linear Models (GLM) bandit algorithms.
In this setting each arm is represented by a feature vector x and there exists
an unknown weight vector w*. In each round the algorithm pulls an arm and gets
a noisy reward, which is assumed to be the result of composing the dot product
x.w* with a link function, plus random noise. For example,
sigmoid(x.w*) + eps, where eps is sub-Gaussian noise.
Attributes:
arms: the arms pulled so far
rewards: the rewards observed so far
dim: the dimension of the feature/weight vectors
outer: the sum of outer products of the arms pulled so far (x.x^T)
sigma0: a parameter scaling the identity matrix used for making the
resulting Gram matrix positive definite.
optimism_scaling: A float specifying the confidence level. Default value
(1.0) corresponds to the exploration strategy presented in the literature.
A smaller number means less exploration and more exploitation.
_rng: An instance of random.RandomState for random number generation
"""
def __init__(self, dim, sigma0=1., optimism_scaling=1.):
self._rewards = np.array([]) # stores all rewards
self._arms = np.ndarray([0, dim]) # stores arm differences
self._dim = dim
self._outer = np.zeros([dim, dim])
self._sigma0 = sigma0
self._optimism_scaling = optimism_scaling
def update(self, reward, arm):
"""Updates state with arm and reward.
Args:
reward: the reward received
arm: the arm that was pulled
"""
assert len(arm) == self._dim, 'Expected dimension {}, got {}'.format(
self._dim, len(arm))
self._rewards = np.append(self._rewards, reward)
self._arms = np.concatenate((self._arms, [arm]), axis=0)
self._outer += np.outer(arm, arm)
def solve_logistic_bandit(self, init_iters=10, num_iters=20, tol=1e-3):
"""Solves the maximum-likelihood problem.
Implements iterative reweighted least squares for Bayesian logistic
regression. See sections 4.3.3 and 4.5.1 in Pattern Recognition and Machine
Learning, Bishop (2006)
Args:
init_iters: number of initial iterations to skip (returns zeros)
num_iters: number of least squares iterations
tol: tolerance level of change in solution between iterations before
terminating
Returns:
w: maximum likelihood solution
gram: Gram matrix
"""
arms = self._arms
w = np.zeros(self._dim)
gram = np.eye(self._dim) / np.square(self._sigma0)
if len(self._arms) > init_iters:
for _ in range(num_iters):
prev_w = np.copy(w)
arms_w = arms.dot(w)
sig_arms_w = special.expit(arms_w)
r = np.diag(sig_arms_w * (1 - sig_arms_w))
gram = (((arms.T).dot(r)).dot(arms) +
np.eye(self._dim) / np.square(self._sigma0))
rz = r.dot(arms_w) - (sig_arms_w - self._rewards)
w = np.linalg.solve(gram, (arms.T).dot(rz))
if np.linalg.norm(w - prev_w) < tol:
break
return w, gram
def get_arm_matrix(self, arms):
"""Puts all arms into a matrix."""
return np.stack(arms, axis=0)
@abc.abstractmethod
def get_arm(self, arms):
"""Computes which arm to pull next.
Args:
arms: a list of feature vectors, one for each arm
Returns:
arm: the chosen arm
arm_ind: index of the chosen arm
scores: an array with arm scores
"""
class UCB_GLM(GLMAlgorithm): # pylint: disable=invalid-name
"""UCB-GLM algorithm.
See "Provably Optimal Algorithms for Generalized Linear Contextual Bandits",
by Li et al. (2017).
"""
def __init__(self, dim, horizon, sigma0=1., optimism_scaling=1.):
super(UCB_GLM, self).__init__(dim, sigma0, optimism_scaling)
# Set confidence interval scaling, by
# Theorem 2 in Li (2017)
# Provably Optimal Algorithms for Generalized Linear Contextual Bandits
crs = optimism_scaling # confidence region scaling
delta = 1. / float(horizon)
sigma = 0.5
kappa = 0.25
# Confidence ellipsoid width (cew):
cew = (sigma / kappa) * (np.sqrt((self._dim / 2) *
np.log(1. + 2. * horizon / self._dim) +
np.log(1 / delta)))
self._ci_scaling = crs * cew
def get_arm(self, arms):
"""Computes which arm to pull next.
Args:
arms: a list of feature vectors, one for each arm
Returns:
The selected arm, its index in arms, and the computed scores
"""
arm_matrix = self.get_arm_matrix(arms)
gram = self._outer + np.eye(self._dim) / np.square(self._sigma0)
gram_inv = np.linalg.inv(gram)
ucbs = np.sqrt((np.matmul(arm_matrix, gram_inv) * arm_matrix).sum(axis=1))
# Estimate w
w, _ = self.solve_logistic_bandit()
# Compute UCB
mu = np.matmul(arm_matrix, w) + self._ci_scaling * ucbs
arm = np.random.choice(np.flatnonzero(mu == mu.max()))
return arms[arm], arm, mu
@staticmethod
def print():
return 'GLM-UCB'
class GLM_TS(GLMAlgorithm): # pylint: disable=invalid-name
"""Thompson sampling algorithm for generalized linear models.
See "Linear Thompson Sampling Revisited" by Abeille and Lazaric (2017).
"""
def get_arm(self, arms):
"""Computes which arm to pull next.
Args:
arms: a list of feature vectors, one for each arm
Returns:
The selected arm, its index in arms, and the computed scores
"""
arm_matrix = self.get_arm_matrix(arms)
w, gram = self.solve_logistic_bandit()
gram_inv = np.square(self._optimism_scaling) * np.linalg.inv(gram)
# Posterior sampling
w_tilde = np.random.multivariate_normal(w, gram_inv)
mu = np.matmul(arm_matrix, w_tilde)
# Argmax breaking ties randomly
arm = np.random.choice(np.flatnonzero(mu == mu.max()))
return arms[arm], arm, mu
@staticmethod
def print():
return 'GLM-TS'
|
google-research/recsim
|
recsim/agents/bandits/glm_algorithms.py
|
Python
|
apache-2.0
| 6,821
|
[
"Gaussian"
] |
4fa7468a0773cd153360e035f5779f73d2ee7e363f64f9a1cce5120a9fbf1978
|
"""
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting, and Gaussian process classification.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
kernel = 1.0 * RBF([1.0, 1.0]) # for GPC
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'),
'GPC': GaussianProcessClassifier(kernel)
}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='w', edgecolor='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
|
vortex-ape/scikit-learn
|
examples/classification/plot_classification_probability.py
|
Python
|
bsd-3-clause
| 2,886
|
[
"Gaussian"
] |
abb4ab8e2658823a565a636aa69ab77fb7eb6d4996b60a79b54b3d84f0061537
|
#! /usr/bin/env python2
from RecBlastUtils import *
import urllib2
import pickle
from time import strftime
import subprocess
# Description:
# The script performs part 1 of the RecBlast program. Starting from a list of genes and taxa and then moving on to
# saving the sequences and running Blast on them.
# this dictionary holds information about the genes we set out to check. structure:
# {protein_inner_id (index: 0,1,2..) = [common_id, full_id, uniprot_id]}
id_dic = {} # initialize
local_id_dic = {}
# this functions returns a FASTA sequence according to uniprot id.
def get_uni(uni_id, contact):
"""
Receives a sequence ID and an email address (optional) and returns the FASTA sequence.
Connecting to UNIPROT
:param contact: is the contact details (email address) for using the app. By default it will be the recblast email.
:param uni_id: uniprot id
:return:
"""
url = "http://www.uniprot.org/uniprot/" # uniprot to fasta
url_uniprot = url + uni_id + ".fasta" # the UNIPROT url
request = urllib2.Request(url_uniprot)
request.add_header('User-Agent', 'Python %s' % contact) #
response = urllib2.urlopen(request) # get request
page = response.read(200000) # read (up to 200000 lines)
page = replace(page, "\n", "|")
return page
def main(file_path, contact, run_folder, fasta_path, first_blast_folder, fasta_output_folder, blastp_path, db,
taxa_list_file, outfmt, max_target_seqs, e_value_thresh, coverage_threshold, cpu, run_all, DEBUG, debug,
run_first_blast):
"""
Main function of part_one. Performs most
:param run_first_blast: if True, runs the first blast process (default)
:param file_path: The gene list file
:param contact: email address (RecBlast)
:param run_folder: path to run in
:param fasta_path: path of fasta files
:param first_blast_folder: path of first blast files
:param fasta_output_folder: path to the output fasta files
:param blastp_path: path to blastp
:param db: DB location
:param taxa_list_file: path to the taxa list file
:param outfmt: blast out format. constant.
:param max_target_seqs: Blast parameter
:param e_value_thresh: Blast parameter
:param coverage_threshold: Blast parameter
:param cpu: number of CPU to use in BLAST
:param DEBUG: DEBUG parameter
:param debug: debug function
:param run_all: run all sequences as one file (boolean, default False)
:return:
"""
# defined in advance for efficiency:
regex = re.compile(r'>.*=\d?\|')
# gene_line_regex = re.compile(r'([A-Za-z0-9]+),(.+),([A-Za-z0-9]+)$') # Probably an earlier version
gene_line_regex = re.compile(r'([A-Za-z0-9_]+),(.+),([A-Za-z0-9_]+)')
# initialize a list for the blast output file paths
blast_one_output_files = []
debug("Starting to work on the input csv file:")
with open(file_path) as f: # open .csv file
# initialize index:
csv_line_index = 0 # csv line
gene_id_index = 1 # matching genes
for line in f:
if csv_line_index > 0 and line != '\n': # skipping the header and empty lines
try:
# generates a FASTA sequence from each protein in the input CSV
gene_line_res = re_search(gene_line_regex, strip(line)) # using regex to search
common_id = gene_line_res.group(1) # gene name
full_id = gene_line_res.group(2) # description
uniprot_id = strip(replace(gene_line_res.group(3), "\n", ""))
# getting the FASTA sequence from uniprot using uniprot_id
fa = get_uni(uniprot_id, contact)
# cleaning the FASTA sequence
result = re_match(regex, fa)
fa = re_sub(regex, '', fa)
fa = replace(fa, "\n", "")
fa = replace(fa, "|", "")
grouped_res = result.group()
local_seq_id = split(grouped_res, ' ')[0][1:] # this is the seq_id used by blast
debug("local_seq_id is {}".format(local_seq_id))
sequence = fa # before we add the header to the fasta, we want to keep the sequence itself
fa = "\n".join([grouped_res, fa]) # header and sequence
# building a dictionary of the proteins we are going to check:
# {protein_inner_id: [fasta, common_id, full_id, uniprot_id]}
id_dic[gene_id_index] = [fa, common_id, full_id, uniprot_id, sequence, local_seq_id] # added
# save in a dictionary
gene_id_index += 1 # moving to the next gene on the list
except Exception, e: # in case uniprot doesn't work, please notify!
print "There is a problem with retrieving the sequences from UniProt, in line:\n{0}" \
"Please try again later.\n{1}".format(line, e)
csv_line_index += 1 # next row
# creating a file with the information about the genes we are checking in this run.
# this pickle will be used for reference later
pickle.dump(id_dic, open(join_folder(run_folder, 'genes_for_inspection_full.p'), 'wb'))
debug("Success in updating genes_for_inspection file")
# This part is for running the sequences together and not individually
if run_all:
all_fasta_filename = join_folder(fasta_path, "all_fasta.fasta")
all_blast_output_file = join_folder(first_blast_folder, "all_results.txt")
filtered_all_blast_out_filename = join_folder(first_blast_folder, "all_results.taxa_filtered.txt")
# checking if the file already exists from a previous run:
if exists_not_empty(all_fasta_filename):
debug("Fasta file {} already exists, deleting and starting a new one.".format(all_fasta_filename))
os.remove(all_fasta_filename)
# generating FASTA files and performing the blast:
for gene_id_index, valueList in id_dic.iteritems():
# defining file paths:
debug("gene_index_id: {}".format(gene_id_index))
job_name = "fasta-%s-%s" % (gene_id_index, valueList[1]) # job_name: index-common_id
debug("job_name: {}".format(job_name))
fasta_filename = join_folder(fasta_path, "{}.fasta".format(job_name))
blast_out_filename = "{}_full.txt".format(job_name) # BLAST update_match_results file
# the update_match_results file after filtering taxa:
filtered_blast_out_filename = "{}.taxa_filtered.txt".format(job_name)
# debug(valueList[5]) # That was a debug print.
local_id_dic[valueList[5]] = (fasta_filename, filtered_blast_out_filename)
with open(fasta_filename, 'w') as output:
output.write("{}\n\n".format(valueList[0])) # write fasta to output file
# Adding writing to the unified
if run_all:
with open(all_fasta_filename, 'a') as output:
output.write("{}\n\n".format(valueList[0])) # write fasta to output file
blast_output_file = join_folder(first_blast_folder, blast_out_filename)
filtered_blast_out_filename = join_folder(first_blast_folder, filtered_blast_out_filename)
# copy the fasta file to the fasta_output folder
fasta_output_name = replace(fasta_filename, '.fasta', '')
fasta_output_filename_rbh = join_folder(fasta_output_folder,
os.path.basename(fasta_output_name) + '_RBH.fasta')
fasta_output_filename_ns = join_folder(fasta_output_folder,
os.path.basename(fasta_output_name) + '_non-strict.fasta')
fasta_output_filename_strict = join_folder(fasta_output_folder,
os.path.basename(fasta_output_name) + '_strict.fasta')
# 3 fasta output files:
shutil.copy(fasta_filename, fasta_output_filename_rbh) # copy
shutil.copy(fasta_filename, fasta_output_filename_ns) # copy
shutil.copy(fasta_filename, fasta_output_filename_strict) # copy
if not run_all:
# command line to run:
command_line = "{0} -query {1} -db {2} -outfmt '{3}' -max_target_seqs {4} -evalue {5} -max_hsps 1 " \
"-qcov_hsp_perc {6} -num_threads {7} -out {8}\n" \
"grep -v ';' {8} | grep -w -f {9} > {10}\nrm {8}\n".format(blastp_path, fasta_filename, db,
outfmt, max_target_seqs,
e_value_thresh,
coverage_threshold, cpu,
blast_output_file, taxa_list_file,
filtered_blast_out_filename)
if run_first_blast:
debug("Running the following line:\n{}".format(command_line))
# writing the command to file and running the file
try: # this try paragraph was added later to handle
script_path = write_blast_run_script(command_line, run_folder) # I/O operations,
subprocess.check_call(script_path) # delay in read/write operations
except subprocess.CalledProcessError: # restarting the process
debug("Had a little problem with running this command: " # (with a short sleep period)
"{}\nSo we are running it again.".format(command_line))
sleep(10)
script_path = write_blast_run_script(command_line, run_folder)
sleep(20)
subprocess.check_call(script_path)
print "Finished running {0}.".format(job_name)
else:
debug("Not running blast.\nSkipped running the following line:\n{}".format(command_line))
# adding the filtered file name here:
blast_one_output_files.append(filtered_blast_out_filename) # adding even if we didn't run blast
# Running on the
if run_all:
# command line to run:
command_line = "{0} -query {1} -db {2} -outfmt '{3}' -max_target_seqs {4} -evalue {5} -max_hsps 1 " \
"-qcov_hsp_perc {6} -num_threads {7} -out {8}\n" \
"grep -v ';' {8} | grep -w -f {9} > {10}\nrm {8}\n".format(blastp_path, all_fasta_filename, db,
outfmt, max_target_seqs,
e_value_thresh, coverage_threshold,
cpu, all_blast_output_file,
taxa_list_file,
filtered_all_blast_out_filename)
debug("Running the following line:\n{}".format(command_line))
# writing the command to file and running the file
try: # this try paragraph was added later to handle
script_path = write_blast_run_script(command_line, run_folder) # I/O operations, delay in read/write
subprocess.check_call(script_path) # operations, etc.
except subprocess.CalledProcessError: # restarting the process (with a little sleep period)
debug("Had a little problem with running this command: "
"{}\nSo we are running it again.".format(command_line))
sleep(10)
script_path = write_blast_run_script(command_line, run_folder)
sleep(20)
subprocess.check_call(script_path)
print "Finished running all sequences."
print "Prepared and ran first BLAST on all FASTA files."
# dumping id_dic file for pickle:
pickle.dump(id_dic, open(join_folder(run_folder, "id_dic.p"), 'wb'))
print "Part 1 done at {}".format(strftime('%H:%M:%S'))
return id_dic, blast_one_output_files, local_id_dic
# DONE with part 1
|
neuhofmo/RecBlast
|
part_one.py
|
Python
|
mit
| 12,789
|
[
"BLAST"
] |
b7cff52b283814e315414b0a7f61b77cbe0485e241deaa9b93a80d23150ec482
|
"""
Provides a base class that implements operations on a taxonomic tree that
require recursively visiting each taxon within the tree.
"""
class TaxonVisitor:
"""
Base class for all taxon tree visitor classes. Client code calls the visit() method
with an instance of Taxon as an argument. The visitor class will then traverse the
taxon tree and operate on each Taxon object in the tree. The traversal can be limited
by either the total number of taxa processed or tree depth (or both). In essence, this
base class encapsulates an algorithm for traversing a taxa tree and allows operations
on tree objects to be implemented independently of the Taxon implementation.
"""
def __init__(self, numtaxa=-1, maxdepth=-1):
"""
If numtaxa > 0, only the first numtaxa taxa will be visited when a taxon tree
is traversed. If maxdepth > -1, the tree will only be traversed to a depth
of maxdepth.
"""
self.numtaxa = numtaxa
self.maxdepth = maxdepth
def visit(self, taxon):
"""
Initiates the taxon tree traversal.
"""
self.taxacnt = 0
self._traverseTree(taxon, 0)
def _traverseTree(self, taxon, depth):
"""
Internal method for traversing a taxon tree that tracks the recursion depth.
"""
self.processTaxon(taxon, depth)
self.taxacnt += 1
if (self.maxdepth < 0 or depth < self.maxdepth) and self.doRecursion():
for child in taxon.children:
if self.numtaxa > 0 and self.taxacnt >= self.numtaxa:
break
self._traverseTree(child, depth + 1)
self.postTaxonProcessing(taxon, depth)
def doRecursion(self):
"""
This method can be overriden by child classes and used to implement additional
criteria for deciding whether to recursively descend into the next level of a
taxa tree. The method is called prior to processing the child taxa of a taxon;
if it returns True, the recursion is continued, otherwise, the children are
not visited.
"""
return True
def processTaxon(self, taxon, depth):
"""
This method is called for each Taxon object in the tree. The argument 'depth'
provides the current depth in the tree, with the root at 0. This method should
be overridden by child classes to actually do something with each Taxon object.
"""
pass
def postTaxonProcessing(self, taxon, depth):
"""
This method is called after tree traversal has returned from recursively
traversing taxon's descendents. It can be overridden by child classes to
implement "clean up" code that should be run before leaving a taxon.
"""
pass
|
stuckyb/sqlite_taxonomy
|
utilities/taxolib/taxonvisitor.py
|
Python
|
gpl-3.0
| 2,844
|
[
"VisIt"
] |
4d04b5d81daf56a94e1d053306267c6d6d78a740dd378f4dcfe86bab21b91a97
|
# -*- coding: utf-8 -*-
import random
import time
import scrapy
from scrapyprj.items import HouseNewsItem
from scrapyprj.utils import safe_extract, extract_article, extract_url
class SznewsSpider(scrapy.Spider):
handle_httpstatus_list = [400, 404, 407, 502]
name = "sznews"
allowed_domains = ["dc.sznews.com"]
start_urls = (
'http://dc.sznews.com/node_204507.htm',
)
def __init__(self, name=None, **kwargs):
super(SznewsSpider, self).__init__(name, **kwargs)
def parse(self, response):
article_titles = response.xpath(
'//div[@class="fl w660-news-index"]/div[@class="list-con"]/h3/a')
for i, article_title in enumerate(article_titles):
self.logger.info('article: %s' %
safe_extract(article_title.xpath('./text()')))
yield scrapy.Request(
extract_url(response, article_title.xpath('./@href')),
callback=self.parse_detail,
headers={},
meta={
'item': {
'uid': 'detail',
},
'dont_merge_cookies': True,
'cookiejar': 2 * i + 1,
})
next_page = response.xpath(
'//div[@class="fl w660-news-index"]/div/center/span/following-sibling::a[1]/@href')
yield scrapy.Request(
extract_url(response, next_page),
callback=self.parse,
headers={},
meta={
'item': {
'uid': 'list',
},
'dont_merge_cookies': True,
'cookiejar': random.randint(1, 10000),
})
pass
def parse_detail(self, response):
# TODO: manybe consider goose or newspaper to deal with article
# extraction
# with open('/tmp/a.html', 'w') as f:
# f.write(response.body)
article = extract_article(raw_html=response.body)
if response.xpath('//*[@id="source_baidu"]/a'):
source = response.xpath('//*[@id="source_baidu"]/a')
source_name = safe_extract(source.xpath('./text()'))
source_url = safe_extract(source.xpath('./@href'))
else:
source = response.xpath('//*[@id="source_baidu"]')
try:
source_name = str(
safe_extract(
source.xpath('./text()')).split(':')[1]).strip()
except:
source_name = None
source_url = None
item = response.meta.get('item').get('item')
if item:
item['content'] = item.get('content') + article['cleaned_text']
item['keywords'] = article['meta']['keywords']
else:
item = HouseNewsItem({
# 'html_document': [safe_extract(response.xpath('//*[@id="PrintTxt"]'))],
'url': [response.url],
'title': safe_extract(response.xpath('//*[@id="PrintTxt"]/h2/text()')),
'crawl_time': time.time(),
'release_time': safe_extract(response.xpath('//*[@id="pubtime_baidu"]/text()')),
'summary': safe_extract(response.xpath('//p[@id="fzy"]/text()')),
# 'content': safe_extract(response.xpath('(//*[@id="PrintTxt"]/div[2]/p/font|//*[@id="PrintTxt"]/div[2]/p)/text()')),
'content': article['cleaned_text'],
'source_name': source_name,
'source_url': source_url,
'keywords': article['meta']['keywords'],
})
# TODO: an article may be separated in two pages, we need to visit the
# next page
next_page = response.xpath(
'//*[@id="jyzdy_q"]/div[3]/center/span/following-sibling::a[1]/@href')
if next_page:
self.logger.info('next detail page %s' %
(extract_url(response, next_page)))
yield scrapy.Request(extract_url(response, next_page),
callback=self.parse_detail,
meta={
'item': {
'item': item,
},
}
)
else:
yield item
pass
|
Alexoner/web-crawlers
|
scrapyprj/scrapyprj/spiders/sznews.py
|
Python
|
gpl-2.0
| 4,365
|
[
"VisIt"
] |
232904d7ef02342d8622bd049d8afe0609e7d9a44908de29a2296c3f152403e3
|
"""Groove MIDI Loader
.. admonition:: Dataset Info
:class: dropdown
The Groove MIDI Dataset (GMD) is composed of 13.6 hours of aligned MIDI and
synthesized audio of human-performed, tempo-aligned expressive drumming.
The dataset contains 1,150 MIDI files and over 22,000 measures of drumming.
To enable a wide range of experiments and encourage comparisons between methods
on the same data, Gillick et al. created a new dataset of drum performances
recorded in MIDI format. They hired professional drummers and asked them to
perform in multiple styles to a click track on a Roland TD-11 electronic drum kit.
They also recorded the aligned, high-quality synthesized audio from the TD-11 and
include it in the release.
The Groove MIDI Dataset (GMD), has several attributes that distinguish it from
existing ones:
* The dataset contains about 13.6 hours, 1,150 MIDI files, and over 22,000
measures of drumming.
* Each performance was played along with a metronome set at a specific tempo
by the drummer.
* The data includes performances by a total of 10 drummers, with more than 80%
of duration coming from hired professionals. The professionals were able to
improvise in a wide range of styles, resulting in a diverse dataset.
* The drummers were instructed to play a mix of long sequences (several minutes
of continuous playing) and short beats and fills.
* Each performance is annotated with a genre (provided by the drummer), tempo,
and anonymized drummer ID.
* Most of the performances are in 4/4 time, with a few examples from other time
signatures.
* Four drummers were asked to record the same set of 10 beats in their own
style. These are included in the test set split, labeled eval-session/groove1-10.
* In addition to the MIDI recordings that are the primary source of data for the
experiments in this work, the authors captured the synthesized audio outputs of
the drum set and aligned them to within 2ms of the corresponding MIDI files.
A train/validation/test split configuration is provided for easier comparison of
model accuracy on various tasks.
The dataset is made available by Google LLC under a Creative Commons
Attribution 4.0 International (CC BY 4.0) License.
For more details, please visit: http://magenta.tensorflow.org/datasets/groove
"""
import csv
import os
from typing import BinaryIO, Optional, Tuple
from deprecated.sphinx import deprecated
import librosa
import numpy as np
import pretty_midi
from smart_open import open
from mirdata import annotations
from mirdata import core
from mirdata import download_utils
from mirdata import io
from mirdata import jams_utils
BIBTEX = """@inproceedings{groove2019,
Author = {Jon Gillick and Adam Roberts and Jesse Engel and Douglas Eck
and David Bamman},
Title = {Learning to Groove with Inverse Sequence Transformations},
Booktitle = {International Conference on Machine Learning (ICML)},
Year = {2019},
}"""
INDEXES = {
"default": "1.0.0",
"test": "1.0.0",
"1.0.0": core.Index(filename="groove_midi_index_1.0.0.json"),
}
REMOTES = {
"all": download_utils.RemoteFileMetadata(
filename="groove-v1-0.0.zip",
url="http://storage.googleapis.com/magentadata/datasets/groove/groove-v1.0.0.zip",
checksum="99db7e2a087761a913b2abfb19e86181",
unpack_directories=["groove"],
)
}
LICENSE_INFO = "Creative Commons Attribution 4.0 International (CC BY 4.0) License."
DRUM_MAPPING = {
36: {"Roland": "Kick", "General MIDI": "Bass Drum 1", "Simplified": "Bass (36)"},
38: {
"Roland": "Snare (Head)",
"General MIDI": "Acoustic Snare",
"Simplified": "Snare (38)",
},
40: {
"Roland": "Snare (Rim)",
"General MIDI": "Electric Snare",
"Simplified": "Snare (38)",
},
37: {
"Roland": "Snare X-Stick",
"General MIDI": "Side Stick",
"Simplified": "Snare (38)",
},
48: {
"Roland": "Tom 1",
"General MIDI": "Hi-Mid Tom",
"Simplified": "High Tom (50)",
},
50: {
"Roland": "Tom 1 (Rim)",
"General MIDI": "High Tom",
"Simplified": "High Tom (50)",
},
45: {
"Roland": "Tom 2",
"General MIDI": "Low Tom",
"Simplified": "Low-Mid Tom (47)",
},
47: {
"Roland": "Tom 2 (Rim)",
"General MIDI": "Low-Mid Tom",
"Simplified": "Low-Mid Tom (47)",
},
43: {
"Roland": "Tom 3 (Head)",
"General MIDI": "High Floor Tom",
"Simplified": "High Floor Tom (43)",
},
58: {
"Roland": "Tom 3 (Rim)",
"General MIDI": "Vibraslap",
"Simplified": "High Floor Tom (43)",
},
46: {
"Roland": "HH Open (Bow)",
"General MIDI": "Open Hi-Hat",
"Simplified": "Open Hi-Hat (46)",
},
26: {
"Roland": "HH Open (Edge)",
"General MIDI": "N/A",
"Simplified": "Open Hi-Hat (46)",
},
42: {
"Roland": "HH Closed (Bow)",
"General MIDI": "Closed Hi-Hat",
"Simplified": "Closed Hi-Hat (42)",
},
22: {
"Roland": "HH Closed (Edge)",
"General MIDI": "N/A",
"Simplified": "Closed Hi-Hat (42)",
},
44: {
"Roland": "HH Pedal",
"General MIDI": "Pedal Hi-Hat",
"Simplified": "Closed Hi-Hat (42)",
},
49: {
"Roland": "Crash 1 (Bow)",
"General MIDI": "Crash Cymbal 1",
"Simplified": "Crash Cymbal (49)",
},
55: {
"Roland": "Crash 1 (Edge)",
"General MIDI": "Splash Cymbal",
"Simplified": "Crash Cymbal (49)",
},
57: {
"Roland": "Crash 2 (Bow)",
"General MIDI": "Crash Cymbal 2",
"Simplified": "Crash Cymbal (49)",
},
52: {
"Roland": "Crash 2 (Edge)",
"General MIDI": "Chinese Cymbal",
"Simplified": "Crash Cymbal (49)",
},
51: {
"Roland": "Ride (Bow)",
"General MIDI": "Ride Cymbal 1",
"Simplified": "Ride Cymbal (51)",
},
59: {
"Roland": "Ride (Edge)",
"General MIDI": "Ride Cymbal 2",
"Simplified": "Ride Cymbal (51)",
},
53: {
"Roland": "Ride (Bell)",
"General MIDI": "Ride Bell",
"Simplified": "Ride Cymbal (51)",
},
}
class Track(core.Track):
"""Groove MIDI Track class
Args:
track_id (str): track id of the track
Attributes:
drummer (str): Drummer id of the track (ex. 'drummer1')
session (str): Type of session (ex. 'session1', 'eval_session')
track_id (str): track id of the track (ex. 'drummer1/eval_session/1')
style (str): Style (genre, groove type) of the track (ex. 'funk/groove1')
tempo (int): track tempo in beats per minute (ex. 138)
beat_type (str): Whether the track is a beat or a fill (ex. 'beat')
time_signature (str): Time signature of the track (ex. '4-4', '6-8')
midi_path (str): Path to the midi file
audio_path (str): Path to the audio file
duration (float): Duration of the midi file in seconds
split (str): Whether the track is for a train/valid/test set. One of
'train', 'valid' or 'test'.
Cached Properties:
beats (BeatData): Machine-generated beat annotations
drum_events (EventData): Annotated drum kit events
midi (pretty_midi.PrettyMIDI): object containing MIDI information
"""
def __init__(
self,
track_id,
data_home,
dataset_name,
index,
metadata,
):
super().__init__(
track_id,
data_home,
dataset_name,
index,
metadata,
)
self.midi_path = self.get_path("midi")
self.audio_path = self.get_path("audio")
@property
def drummer(self):
return self._track_metadata.get("drummer")
@property
def session(self):
return self._track_metadata.get("session")
@property
def style(self):
return self._track_metadata.get("style")
@property
def tempo(self):
return self._track_metadata.get("tempo")
@property
def beat_type(self):
return self._track_metadata.get("beat_type")
@property
def time_signature(self):
return self._track_metadata.get("time_signature")
@property
def duration(self):
return self._track_metadata.get("duration")
@property
def split(self):
return self._track_metadata.get("split")
@property
def midi_filename(self):
return self._track_metadata.get("midi_filename")
@property
def audio_filename(self):
return self._track_metadata.get("audio_filename")
@property
def audio(self) -> Tuple[Optional[np.ndarray], Optional[float]]:
"""The track's audio
Returns:
* np.ndarray - audio signal
* float - sample rate
"""
return load_audio(self.audio_path)
@core.cached_property
def beats(self):
return load_beats(self.midi_path, self.midi)
@core.cached_property
def drum_events(self):
return load_drum_events(self.midi_path, self.midi)
@core.cached_property
def midi(self):
return load_midi(self.midi_path)
def to_jams(self):
"""Get the track's data in jams format
Returns:
jams.JAMS: the track's data in jams format
"""
return jams_utils.jams_converter(
beat_data=[(self.beats, "midi beats")],
tempo_data=[(self.tempo, "midi tempo")],
event_data=[(self.drum_events, "annotated drum patterns")],
metadata=self._track_metadata,
)
def load_audio(path: str) -> Tuple[Optional[np.ndarray], Optional[float]]:
"""Load a Groove MIDI audio file.
Args:
path: path to an audio file
Returns:
* np.ndarray - the mono audio signal
* float - The sample rate of the audio file
"""
if not path:
return None, None
return librosa.load(path, sr=22050, mono=True)
@io.coerce_to_bytes_io
def load_midi(fhandle: BinaryIO) -> Optional[pretty_midi.PrettyMIDI]:
"""Load a Groove MIDI midi file.
Args:
fhandle (str or file-like): File-like object or path to midi file
Returns:
midi_data (pretty_midi.PrettyMIDI): pretty_midi object
"""
return pretty_midi.PrettyMIDI(fhandle)
def load_beats(midi_path, midi=None):
"""Load beat data from the midi file.
Args:
midi_path (str): path to midi file
midi (pretty_midi.PrettyMIDI): pre-loaded midi object or None
if None, the midi object is loaded using midi_path
Returns:
annotations.BeatData: machine generated beat data
"""
if midi is None:
midi = load_midi(midi_path)
beat_times = midi.get_beats()
beat_range = np.arange(0, len(beat_times))
meter = midi.time_signature_changes[0]
beat_positions = 1 + np.mod(beat_range, meter.numerator)
return annotations.BeatData(beat_times, "s", beat_positions, "bar_index")
def load_drum_events(midi_path, midi=None):
"""Load drum events from the midi file.
Args:
midi_path (str): path to midi file
midi (pretty_midi.PrettyMIDI): pre-loaded midi object or None
if None, the midi object is loaded using midi_path
Returns:
annotations.EventData: drum event data
"""
if midi is None:
midi = load_midi(midi_path)
start_times = []
end_times = []
events = []
for note in midi.instruments[0].notes:
start_times.append(note.start)
end_times.append(note.end)
events.append(DRUM_MAPPING[note.pitch]["Roland"])
return annotations.EventData(
np.array([start_times, end_times]).T, "s", events, "open"
)
@core.docstring_inherit(core.Dataset)
class Dataset(core.Dataset):
"""
The groove_midi dataset
"""
def __init__(self, data_home=None, version="default"):
super().__init__(
data_home,
version,
name="groove_midi",
track_class=Track,
bibtex=BIBTEX,
indexes=INDEXES,
remotes=REMOTES,
license_info=LICENSE_INFO,
)
@deprecated(
reason="Use mirdata.datasets.groove_midi.load_audio",
version="0.3.4",
)
def load_audio(self, *args, **kwargs):
return load_audio(*args, **kwargs)
@deprecated(
reason="Use mirdata.datasets.groove_midi.load_midi",
version="0.3.4",
)
def load_midi(self, *args, **kwargs):
return load_midi(*args, **kwargs)
@deprecated(
reason="Use mirdata.datasets.groove_midi.load_beats",
version="0.3.4",
)
def load_beats(self, *args, **kwargs):
return load_beats(*args, **kwargs)
@deprecated(
reason="Use mirdata.datasets.groove_midi.load_drum_events",
version="0.3.4",
)
def load_drum_events(self, *args, **kwargs):
return load_drum_events(*args, **kwargs)
@core.cached_property
def _metadata(self):
metadata_path = os.path.join(self.data_home, "info.csv")
metadata_index = {}
try:
with open(metadata_path, "r") as fhandle:
csv_reader = csv.DictReader(fhandle, delimiter=",")
for row in csv_reader:
track_id = row["id"]
metadata_index[track_id] = {
key: row[key] for key in row.keys() if key != "id"
}
metadata_index[track_id]["tempo"] = int(
metadata_index[track_id].pop("bpm")
)
metadata_index[track_id]["duration"] = float(
metadata_index[track_id]["duration"]
)
metadata_index[track_id]["track_id"] = track_id
except FileNotFoundError:
raise FileNotFoundError("Metadata not found. Did you run .download()?")
return metadata_index
|
mir-dataset-loaders/mirdata
|
mirdata/datasets/groove_midi.py
|
Python
|
bsd-3-clause
| 14,280
|
[
"VisIt"
] |
e9c8861c98e07cbd619d7584121470ab56bd83d507a1a1749875d39dbdf59949
|
#!/usr/bin/env python
"""Automatically install required tools and data to run bcbio-nextgen pipelines.
This automates the steps required for installation and setup to make it
easier to get started with bcbio-nextgen. The defaults provide data files
for human variant calling.
Requires: git, wget, bgzip2, Python 3.x, Python 2.7 or argparse + Python 2.6 and earlier
"""
from __future__ import print_function
import collections
import contextlib
import datetime
import os
import platform
import shutil
import subprocess
import sys
try:
import urllib2 as urllib_request
except ImportError:
import urllib.request as urllib_request
REMOTES = {
"requirements": "https://raw.githubusercontent.com/chapmanb/bcbio-nextgen/master/requirements-conda.txt",
"gitrepo": "git://github.com/chapmanb/bcbio-nextgen.git",
"system_config": "https://raw.github.com/chapmanb/bcbio-nextgen/master/config/bcbio_system.yaml",
"anaconda": "https://repo.continuum.io/miniconda/Miniconda-latest-%s-x86_64.sh"}
def main(args, sys_argv):
check_arguments(args)
check_dependencies()
with bcbio_tmpdir():
setup_data_dir(args)
print("Installing isolated base python installation")
anaconda = install_anaconda_python(args)
print("Installing bcbio-nextgen")
bcbio = install_conda_pkgs(anaconda)
bootstrap_bcbionextgen(anaconda, args)
print("Installing data and third party dependencies")
system_config = write_system_config(REMOTES["system_config"], args.datadir,
args.tooldir)
setup_manifest(args.datadir)
subprocess.check_call([bcbio, "upgrade"] + _clean_args(sys_argv, args))
print("Finished: bcbio-nextgen, tools and data installed")
print(" Genome data installed in:\n %s" % args.datadir)
if args.tooldir:
print(" Tools installed in:\n %s" % args.tooldir)
print(" Ready to use system configuration at:\n %s" % system_config)
print(" Edit configuration file as needed to match your machine or cluster")
def _clean_args(sys_argv, args):
"""Remove data directory from arguments to pass to upgrade function.
"""
base = [x for x in sys_argv if
x.startswith("-") or not args.datadir == os.path.abspath(os.path.expanduser(x))]
if "--nodata" in base:
base.remove("--nodata")
else:
base.append("--data")
return base
def bootstrap_bcbionextgen(anaconda, args):
if args.upgrade == "development":
subprocess.check_call([anaconda["pip"], "install", "--upgrade", "--no-deps",
"git+%s#egg=bcbio-nextgen" % REMOTES["gitrepo"]])
def install_conda_pkgs(anaconda):
if not os.path.exists(os.path.basename(REMOTES["requirements"])):
subprocess.check_call(["wget", REMOTES["requirements"]])
subprocess.check_call([anaconda["conda"], "install", "--quiet", "--yes", "-c", "bioconda",
"--file", os.path.basename(REMOTES["requirements"])])
return os.path.join(anaconda["dir"], "bin", "bcbio_nextgen.py")
def _guess_distribution():
"""Simple approach to identify if we are on a MacOSX or Linux system for Anaconda.
"""
if platform.mac_ver()[0]:
return "macosx"
else:
return "linux"
def install_anaconda_python(args):
"""Provide isolated installation of Anaconda python for running bcbio-nextgen.
http://docs.continuum.io/anaconda/index.html
"""
anaconda_dir = os.path.join(args.datadir, "anaconda")
bindir = os.path.join(anaconda_dir, "bin")
conda = os.path.join(bindir, "conda")
if not os.path.exists(anaconda_dir) or not os.path.exists(conda):
if os.path.exists(anaconda_dir):
shutil.rmtree(anaconda_dir)
dist = args.distribution if args.distribution else _guess_distribution()
url = REMOTES["anaconda"] % ("MacOSX" if dist.lower() == "macosx" else "Linux")
if not os.path.exists(os.path.basename(url)):
subprocess.check_call(["wget", url])
subprocess.check_call("bash %s -b -p %s" %
(os.path.basename(url), anaconda_dir), shell=True)
return {"conda": conda,
"pip": os.path.join(bindir, "pip"),
"dir": anaconda_dir}
def setup_manifest(datadir):
"""Create barebones manifest to be filled in during update
"""
manifest_dir = os.path.join(datadir, "manifest")
if not os.path.exists(manifest_dir):
os.makedirs(manifest_dir)
def write_system_config(base_url, datadir, tooldir):
"""Write a bcbio_system.yaml configuration file with tool information.
"""
out_file = os.path.join(datadir, "galaxy", os.path.basename(base_url))
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
if os.path.exists(out_file):
# if no tool directory and exists, do not overwrite
if tooldir is None:
return out_file
else:
bak_file = out_file + ".bak%s" % (datetime.datetime.now().strftime("%Y%M%d_%H%M"))
shutil.copy(out_file, bak_file)
if tooldir:
java_basedir = os.path.join(tooldir, "share", "java")
rewrite_ignore = ("log",)
with contextlib.closing(urllib_request.urlopen(base_url)) as in_handle:
with open(out_file, "w") as out_handle:
in_resources = False
in_prog = None
for line in (l.decode("utf-8") for l in in_handle):
if line[0] != " ":
in_resources = line.startswith("resources")
in_prog = None
elif (in_resources and line[:2] == " " and line[2] != " "
and not line.strip().startswith(rewrite_ignore)):
in_prog = line.split(":")[0].strip()
# Update java directories to point to install directory, avoid special cases
elif line.strip().startswith("dir:") and in_prog and in_prog not in ["log", "tmp"]:
final_dir = os.path.basename(line.split()[-1])
if tooldir:
line = "%s: %s\n" % (line.split(":")[0],
os.path.join(java_basedir, final_dir))
in_prog = None
elif line.startswith("galaxy"):
line = "# %s" % line
out_handle.write(line)
return out_file
def setup_data_dir(args):
if not os.path.exists(args.datadir):
cmd = ["mkdir", "-p", args.datadir]
subprocess.check_call(cmd)
@contextlib.contextmanager
def bcbio_tmpdir():
orig_dir = os.getcwd()
work_dir = os.path.join(os.getcwd(), "tmpbcbio-install")
if not os.path.exists(work_dir):
os.makedirs(work_dir)
os.chdir(work_dir)
yield work_dir
os.chdir(orig_dir)
shutil.rmtree(work_dir)
def check_arguments(args):
"""Ensure argruments are consistent and correct.
"""
if args.toolplus and not args.tooldir:
raise argparse.ArgumentTypeError("Cannot specify --toolplus without --tooldir")
def check_dependencies():
"""Ensure required tools for installation are present.
"""
print("Checking required dependencies")
for dep, msg in [(["git", "--version"], "Git (http://git-scm.com/)"),
(["wget", "--version"], "wget"),
(["bzip2", "-h"], "bzip2")]:
try:
p = subprocess.Popen(dep, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
out, code = p.communicate()
except OSError:
out = "Executable not found"
code = 127
if code == 127:
raise OSError("bcbio-nextgen installer requires %s\n%s" % (msg, out))
def _check_toolplus(x):
"""Parse options for adding non-standard/commercial tools like GATK and MuTecT.
"""
import argparse
Tool = collections.namedtuple("Tool", ["name", "fname"])
std_choices = set(["data", "cadd", "dbnsfp"])
if x in std_choices:
return Tool(x, None)
elif "=" in x and len(x.split("=")) == 2:
name, fname = x.split("=")
fname = os.path.normpath(os.path.realpath(fname))
if not os.path.exists(fname):
raise argparse.ArgumentTypeError("Unexpected --toolplus argument for %s. File does not exist: %s"
% (name, fname))
return Tool(name, fname)
else:
raise argparse.ArgumentTypeError("Unexpected --toolplus argument. Expect toolname=filename.")
if __name__ == "__main__":
try:
import argparse
except ImportError:
raise ImportError("bcbio-nextgen installer requires `argparse`, included in Python 2.7.\n"
"Install for earlier versions with `pip install argparse` or "
"`easy_install argparse`.")
parser = argparse.ArgumentParser(
description="Automatic installation for bcbio-nextgen pipelines")
parser.add_argument("datadir", help="Directory to install genome data",
type=lambda x: (os.path.abspath(os.path.expanduser(x))))
parser.add_argument("--tooldir",
help="Directory to install 3rd party software tools. Leave unspecified for no tools",
type=lambda x: (os.path.abspath(os.path.expanduser(x))), default=None)
parser.add_argument("--toolplus", help="Specify additional tool categories to install",
action="append", default=[], type=_check_toolplus)
parser.add_argument("--genomes", help="Genomes to download",
action="append", default=[],
choices=["GRCh37", "hg19", "hg38", "hg38-noalt", "mm10", "mm9", "rn6", "rn5",
"canFam3", "dm3", "galGal4", "phix", "pseudomonas_aeruginosa_ucbpp_pa14",
"sacCer3", "TAIR10", "WBcel235", "xenTro3", "Zv9", "GRCz10"])
parser.add_argument("--aligners", help="Aligner indexes to download",
action="append", default=[],
choices=["bowtie", "bowtie2", "bwa", "novoalign", "rtg", "snap", "star", "ucsc", "hisat2"])
parser.add_argument("--nodata", help="Do not install data dependencies",
dest="install_data", action="store_false", default=True)
parser.add_argument("--isolate", help="Created an isolated installation without PATH updates",
dest="isolate", action="store_true", default=False)
parser.add_argument("-u", "--upgrade", help="Code version to install",
choices=["stable", "development"], default="stable")
parser.add_argument("--distribution", help="Operating system distribution",
default="",
choices=["ubuntu", "debian", "centos", "scientificlinux", "macosx"])
if len(sys.argv) == 1:
parser.print_help()
else:
main(parser.parse_args(), sys.argv[1:])
|
gifford-lab/bcbio-nextgen
|
scripts/bcbio_nextgen_install.py
|
Python
|
mit
| 11,013
|
[
"BWA",
"Bioconda",
"Bowtie",
"Galaxy"
] |
2426231cc2b26b9576be22f40f1206cd2be0a451069599e91ccceb87b9f46afc
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAdsplit(RPackage):
"""This package implements clustering of microarray gene expression
profiles according to functional annotations. For each term genes
are annotated to, splits into two subclasses are computed and a
significance of the supporting gene set is determined."""
homepage = "https://www.bioconductor.org/packages/adSplit/"
url = "https://git.bioconductor.org/packages/adSplit"
version('1.46.0', git='https://git.bioconductor.org/packages/adSplit', commit='7e81a83f34d371447f491b3a146bf6851e260c7c')
depends_on('r@3.4.0:3.4.9', when='@1.46.0')
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-cluster', type=('build', 'run'))
depends_on('r-go-db', type=('build', 'run'))
depends_on('r-kegg-db', type=('build', 'run'))
depends_on('r-multtest', type=('build', 'run'))
|
lgarren/spack
|
var/spack/repos/builtin/packages/r-adsplit/package.py
|
Python
|
lgpl-2.1
| 2,169
|
[
"Bioconductor"
] |
983d6cc8905452f27c703b179adbaa428ccbd15d3ad567b5f2df20ed71a13e8e
|
import datetime
from xml.dom.minidom import parseString
from django.core import mail
from django.core.urlresolvers import reverse
from django.template import Context, Template
from django.test import TestCase, override_settings
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from threadedcomments.models import FreeThreadedComment, ThreadedComment, TestModel
from threadedcomments.models import MARKDOWN, TEXTILE, REST, PLAINTEXT
from threadedcomments.templatetags import threadedcommentstags as tags
__all__ = ("TemplateTagTestCase",)
@override_settings(ROOT_URLCONF="threadedcomments.urls")
class TemplateTagTestCase(TestCase):
def test_get_comment_url(self):
user = User.objects.create_user('user', 'floguy@gmail.com', password='password')
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
comment = ThreadedComment.objects.create_for_object(topic,
user = user,
ip_address = '127.0.0.1',
comment = "My test comment!",
)
c = Context({
'topic': topic,
'parent': comment
})
sc = {
"ct": content_type.pk,
"id": topic.pk,
"pid": comment.pk,
}
self.assertEquals(Template('{% load threadedcommentstags %}{% get_comment_url topic %}').render(c), u'/comment/%(ct)s/%(id)s/' % sc)
self.assertEquals(Template('{% load threadedcommentstags %}{% get_comment_url topic parent %}').render(c), u'/comment/%(ct)s/%(id)s/%(pid)s/' % sc)
self.assertEquals(Template('{% load threadedcommentstags %}{% get_comment_url_json topic %}').render(c), u'/comment/%(ct)s/%(id)s/json/' % sc)
self.assertEquals(Template('{% load threadedcommentstags %}{% get_comment_url_xml topic %}').render(c), u'/comment/%(ct)s/%(id)s/xml/' % sc)
self.assertEquals(Template('{% load threadedcommentstags %}{% get_comment_url_json topic parent %}').render(c), u'/comment/%(ct)s/%(id)s/%(pid)s/json/' % sc)
self.assertEquals(Template('{% load threadedcommentstags %}{% get_comment_url_xml topic parent %}').render(c), u'/comment/%(ct)s/%(id)s/%(pid)s/xml/' % sc)
def test_get_free_comment_url(self):
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
comment = FreeThreadedComment.objects.create_for_object(topic,
ip_address = '127.0.0.1',
comment = "My test free comment!",
)
c = Context({
'topic': topic,
'parent': comment,
})
sc = {
"ct": content_type.pk,
"id": topic.pk,
"pid": comment.pk,
}
self.assertEquals(Template('{% load threadedcommentstags %}{% get_free_comment_url topic %}').render(c), u'/freecomment/%(ct)s/%(id)s/' % sc)
self.assertEquals(Template('{% load threadedcommentstags %}{% get_free_comment_url topic parent %}').render(c), u'/freecomment/%(ct)s/%(id)s/%(pid)s/' % sc)
self.assertEquals(Template('{% load threadedcommentstags %}{% get_free_comment_url_json topic %}').render(c), u'/freecomment/%(ct)s/%(id)s/json/' % sc)
self.assertEquals(Template('{% load threadedcommentstags %}{% get_free_comment_url_xml topic %}').render(c), u'/freecomment/%(ct)s/%(id)s/xml/' % sc)
self.assertEquals(Template('{% load threadedcommentstags %}{% get_free_comment_url_json topic parent %}').render(c), u'/freecomment/%(ct)s/%(id)s/%(pid)s/json/' % sc)
self.assertEquals(Template('{% load threadedcommentstags %}{% get_free_comment_url_xml topic parent %}').render(c), u'/freecomment/%(ct)s/%(id)s/%(pid)s/xml/' % sc)
def test_get_comment_count(self):
user = User.objects.create_user('user', 'floguy@gmail.com', password='password')
topic = TestModel.objects.create(name="Test2")
comment = ThreadedComment.objects.create_for_object(topic,
user = user,
ip_address = '127.0.0.1',
comment = "My test comment!",
)
c = Context({
'topic': topic,
})
self.assertEquals(
Template('{% load threadedcommentstags %}{% get_comment_count for topic as count %}{{ count }}').render(c),
u'1'
)
def test_get_free_comment_count(self):
topic = TestModel.objects.create(name="Test2")
comment = FreeThreadedComment.objects.create_for_object(topic,
ip_address = '127.0.0.1',
comment = "My test free comment!",
)
c = Context({
'topic': topic,
})
self.assertEquals(
Template('{% load threadedcommentstags %}{% get_free_comment_count for topic as count %}{{ count }}').render(c),
u'1'
)
def test_get_threaded_comment_form(self):
with self.settings(LANGUAGE_CODE='en'):
template_string = """
{% load threadedcommentstags %}
{% get_threaded_comment_form as form %}
{{ form }}
"""
self.assertIn(
'textarea',
Template(template_string).render(Context({})))
def test_get_latest_comments(self):
user = User.objects.create_user('user', 'floguy@gmail.com', password='password')
topic = TestModel.objects.create(name="Test2")
old_topic = topic
content_type = ContentType.objects.get_for_model(topic)
ThreadedComment.objects.create_for_object(topic,
user = user,
ip_address = '127.0.0.1',
comment = "Test 1",
)
ThreadedComment.objects.create_for_object(topic,
user = user,
ip_address = '127.0.0.1',
comment = "Test 2",
)
ThreadedComment.objects.create_for_object(topic,
user = user,
ip_address = '127.0.0.1',
comment = "Test 3",
)
self.assertEquals(
Template('{% load threadedcommentstags %}{% get_latest_comments 2 as comments %}{{ comments }}').render(Context({})),
u'[<ThreadedComment: Test 3>, <ThreadedComment: Test 2>]'
)
def test_get_latest_free_comments(self):
topic = TestModel.objects.create(name="Test2")
FreeThreadedComment.objects.create_for_object(topic,
ip_address = '127.0.0.1',
comment = "Test 1",
)
FreeThreadedComment.objects.create_for_object(topic,
ip_address = '127.0.0.1',
comment = "Test 2",
)
FreeThreadedComment.objects.create_for_object(topic,
ip_address = '127.0.0.1',
comment = "Test 3",
)
self.assertEquals(
Template('{% load threadedcommentstags %}{% get_latest_free_comments 2 as comments %}{{ comments }}').render(Context({})),
u'[<FreeThreadedComment: Test 3>, <FreeThreadedComment: Test 2>]'
)
def test_user_comment_tags(self):
user1 = User.objects.create_user('eric', 'floguy@gmail.com', password='password')
user2 = User.objects.create_user('brian', 'brosner@gmail.com', password='password')
topic = TestModel.objects.create(name="Test2")
ThreadedComment.objects.create_for_object(topic,
user = user1,
ip_address = '127.0.0.1',
comment = "Eric comment",
)
ThreadedComment.objects.create_for_object(topic,
user = user2,
ip_address = '127.0.0.1',
comment = "Brian comment",
)
c = Context({
'user': user1,
})
self.assertEquals(
Template('{% load threadedcommentstags %}{% get_user_comments for user as comments %}{{ comments }}').render(c),
u'[<ThreadedComment: Eric comment>]'
)
self.assertEquals(
Template('{% load threadedcommentstags %}{% get_user_comment_count for user as comment_count %}{{ comment_count }}').render(c),
u'1',
)
def test_markdown_comment(self):
user = User.objects.create_user('user', 'floguy@gmail.com', password='password')
topic = TestModel.objects.create(name="Test2")
markdown_txt = '''
A First Level Header
====================
A Second Level Header
---------------------
Now is the time for all good men to come to
the aid of their country. This is just a
regular paragraph.
The quick brown fox jumped over the lazy
dog's back.
### Header 3
> This is a blockquote.
>
> This is the second paragraph in the blockquote.
>
> ## This is an H2 in a blockquote
'''
comment_markdown = ThreadedComment.objects.create_for_object(
topic, user = user, ip_address = '127.0.0.1', markup = MARKDOWN,
comment = markdown_txt,
)
c = Context({
'comment': comment_markdown,
})
s = Template("{% load threadedcommentstags %}{% auto_transform_markup comment %}").render(c).replace('\\n', '')
self.assertEquals(s.startswith(u"<h1>"), True)
def test_plaintext_comment(self):
user = User.objects.create_user('user', 'floguy@gmail.com', password='password')
topic = TestModel.objects.create(name="Test2")
comment_plaintext = ThreadedComment.objects.create_for_object(
topic, user = user, ip_address = '127.0.0.1', markup = PLAINTEXT,
comment = '<b>This is Funny</b>',
)
c = Context({
'comment': comment_plaintext
})
self.assertEquals(
Template("{% load threadedcommentstags %}{% auto_transform_markup comment %}").render(c),
u'<b>This is Funny</b>'
)
comment_plaintext = ThreadedComment.objects.create_for_object(
topic, user = user, ip_address = '127.0.0.1', markup = PLAINTEXT,
comment = '<b>This is Funny</b>',
)
c = Context({
'comment': comment_plaintext
})
self.assertEquals(
Template("{% load threadedcommentstags %}{% auto_transform_markup comment as abc %}{{ abc }}").render(c),
u'<b>This is Funny</b>'
)
def test_gravatar_tags(self):
c = Context({
'email': "floguy@gmail.com",
'rating': "G",
'size': 30,
'default': 'overridectx',
})
self.assertEquals(
Template('{% load gravatar %}{% get_gravatar_url for email %}').render(c),
u'http://www.gravatar.com/avatar.php?gravatar_id=04d6b8e8d3c68899ac88eb8623392150&rating=R&size=80&default=img%3Ablank'
)
self.assertEquals(
Template('{% load gravatar %}{% get_gravatar_url for email as var %}Var: {{ var }}').render(c),
u'Var: http://www.gravatar.com/avatar.php?gravatar_id=04d6b8e8d3c68899ac88eb8623392150&rating=R&size=80&default=img%3Ablank'
)
self.assertEquals(
Template('{% load gravatar %}{% get_gravatar_url for email size 30 rating "G" default override as var %}Var: {{ var }}').render(c),
u'Var: http://www.gravatar.com/avatar.php?gravatar_id=04d6b8e8d3c68899ac88eb8623392150&rating=G&size=30&default=override'
)
self.assertEquals(
Template('{% load gravatar %}{% get_gravatar_url for email size size rating rating default default as var %}Var: {{ var }}').render(c),
u'Var: http://www.gravatar.com/avatar.php?gravatar_id=04d6b8e8d3c68899ac88eb8623392150&rating=G&size=30&default=overridectx'
)
self.assertEquals(
Template('{% load gravatar %}{{ email|gravatar }}').render(c),
u'http://www.gravatar.com/avatar.php?gravatar_id=04d6b8e8d3c68899ac88eb8623392150&rating=R&size=80&default=img%3Ablank'
)
|
amarandon/smeuhsocial
|
apps/threadedcomments/tests/test_templatetags.py
|
Python
|
mit
| 12,278
|
[
"Brian"
] |
30310a91155124862efcfa14a475c5b07a51add3bfa6206ac8c6f4a50cd60d79
|
#-*- coding: utf-8 -*-
""" 通过某个关键字排序一个字典列表 """
from operator import itemgetter
rows = [
{'fname': 'Brian', 'lname': 'Jones', 'uid': 1003},
{'fname': 'David', 'lname': 'Beazley', 'uid': 1002},
{'fname': 'John', 'lname': 'Cleese', 'uid': 1002},
{'fname': 'Big', 'lname': 'Jones', 'uid': 1004}
]
rows_by_fname = sorted(rows, key=itemgetter('fname'))
rows_by_uid = sorted(rows, key=itemgetter('uid'))
rows_by_uid_fname = sorted(rows, key=itemgetter('uid', 'fname'))
print("sorted by fname: ")
print(rows_by_fname)
print("sorted by uid: ")
print(rows_by_uid)
print("sorted by uid & fname: ")
print(rows_by_uid_fname)
print("max by uid: ")
print(max(rows, key=itemgetter('uid')))
print("min by fname: ")
print(min(rows, key=itemgetter('lname')))
|
Jackson-Y/Machine-Learning
|
exercise/sort_dict_List_by_key.py
|
Python
|
mit
| 788
|
[
"Brian"
] |
ebfeb8dad7a7e4e3c08a9a3cbdc579f84233d7c467b42a3bbad78a78fff615a7
|
# -*- coding: UTF-8 -*-
# Copyright (C) 2006, 2010 Hervé Cauwelier <herve@oursours.net>
# Copyright (C) 2006-2007, 2009-2011 J. David Ibáñez <jdavid.ibp@gmail.com>
# Copyright (C) 2007 Sylvain Taverne <taverne.sylvain@gmail.com>
# Copyright (C) 2009 David Versmisse <versmisse@lil.univ-littoral.fr>
# Copyright (C) 2009 Dumont Sébastien <sebastien.dumont@itaapy.com>
# Copyright (C) 2016 Sylvain Taverne <taverne.sylvain@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import from standard library
from copy import deepcopy
from os.path import islink, exists, isdir
from subprocess import Popen
from json import dumps
# Import from itools
from itools.gettext import POFile
# Import from here
from build_gulp import GulpBuilder
from git import open_worktree
def get_manifest():
worktree = open_worktree('.')
return [ x for x in worktree.get_filenames() if not x.startswith('.')]
def ipkg_build(worktree, manifest, config):
from itools.html import XHTMLFile, HTMLFile
package_root = config.get_value('package_root')
rules = [('.po', '.mo', po2mo, None)]
# Pre-load PO files
po_files = {}
for dst_lang in config.get_value('target_languages'):
po = POFile('%s/locale/%s.po' % (package_root, dst_lang))
po_files[dst_lang] = po
# Templates
src_lang = config.get_value('source_language', default='en')
for dst_lang in config.get_value('target_languages'):
rules.append(
('.xml.%s' % src_lang, '.xml.%s' % dst_lang, make_template, XHTMLFile))
rules.append(
('.xhtml.%s' % src_lang, '.xhtml.%s' % dst_lang, make_template, XHTMLFile))
rules.append(
('.html.%s' % src_lang, '.html.%s' % dst_lang, make_template, HTMLFile))
# Make
make(worktree, rules, manifest, package_root, po_files)
def make(worktree, rules, manifest, package_root, po_files):
for source in deepcopy(manifest):
# Exclude
if 'docs/' in source:
continue
# Apply rules
for source_ext, target_ext, f, handler_cls in rules:
if source.endswith(source_ext):
target = source[:-len(source_ext)] + target_ext
print(target)
# Compile
f(package_root, source, target, handler_cls, po_files)
# Update manifest
manifest.add(target)
# PO => MO
def po2mo(package_root, source, target, handler_cls, po_files):
Popen(['msgfmt', source, '-o', target])
# Translate templates
def make_template(package_root, source, target, handler_cls, po_files):
# Import some packages so we can compile templates
from itools.xmlfile.errors import TranslationError
import itools.gettext
# Get file
source_handler = handler_cls(source)
language = target.rsplit('.', 1)[1]
po = po_files[language]
try:
data = source_handler.translate(po)
except TranslationError as e:
# Override source and language
raise TranslationError(line=e.line, source_file=source, language=language)
with open(target, 'w') as f:
f.write(data)
def get_file_path(package_root, filename):
if package_root == '.':
return filename
return package_root + '/' + filename
def get_package_version(package_root):
path = get_file_path(package_root, 'version.txt')
if exists(path):
version = open(path).read().strip()
else:
version = None
return version
def make_version(worktree):
"""This function finds out the version number from the source, this will
be written to the 'version.txt' file, which will be read once the software
is installed to get the version number.
"""
# Get the git description
tag = None
description = worktree.git_describe()
# The version name
if description:
# n represent the number of commit between the tag and the ref
tag, n, commit = description
if n == 0:
# Exact match
return tag
# Try to get the branch
branch = worktree.get_branch_name()
branch = branch or 'nobranch'
if tag and tag.startswith(branch):
branch = tag
# Get the timestamp
try:
head = worktree.get_metadata()
timestamp = head['committer_date']
timestamp = timestamp.strftime('%Y%m%d%H%M')
except KeyError:
# XXX bug in docker ?
timestamp = 'notimestamp'
# Build a version from the branch and the timestamp
return '{}.dev{}'.format(branch, timestamp)
def build(path, config, environment):
# Get version path
package_root = config.get_value('package_root')
version_txt = get_file_path(package_root, 'version.txt')
# Get git worktree
try:
worktree = open_worktree(path)
except KeyError:
worktree = None
# If not in a git repostory, get package version
if worktree is None:
return get_package_version(package_root)
# Find out the version string
version = make_version(worktree)
# Initialize the manifest file (ignore links & submodules)
manifest = set([ x for x in get_manifest() if not islink(x) and not isdir(x)])
manifest.add('MANIFEST')
# Write version
open(path + version_txt, 'w').write(version)
print '**'*30
print '* Version:', version
manifest.add(version_txt)
# Write environment.json file
environment_json = get_file_path(package_root, 'environment.json')
environment_kw = {'build_path': path, 'environment': environment}
open(path + environment_json, 'w').write(dumps(environment_kw))
manifest.add(environment_json)
print '* Build environment.json'
# Run gulp
if environment == 'production':
gulp_builder = GulpBuilder(package_root, worktree, manifest)
gulp_builder.run()
# Rules
ipkg_build(worktree, manifest, config)
# Write the manifest
lines = [ x + '\n' for x in sorted(manifest) ]
open(path + 'MANIFEST', 'w').write(''.join(lines))
print '* Build MANIFEST file (list of files to install)'
print '**'*30
return version
|
hforge/itools
|
itools/pkg/build.py
|
Python
|
gpl-3.0
| 6,702
|
[
"GULP"
] |
54ea42de6571be6ed95a1ac3d67e12d6b069873173bbf0535fbe8baaf1bfe040
|
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""Python code format's checker.
By default try to follow Guido's style guide :
http://www.python.org/doc/essays/styleguide.html
Some parts of the process_token method is based from The Tab Nanny std module.
"""
import re, sys
import tokenize
if not hasattr(tokenize, 'NL'):
raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
from logilab.common.textutils import pretty_match
from logilab.astng import nodes
from pylint.interfaces import IRawChecker, IASTNGChecker
from pylint.checkers import BaseRawChecker
from pylint.checkers.utils import check_messages
from pylint.utils import WarningScope
MSGS = {
'C0301': ('Line too long (%s/%s)',
'line-too-long',
'Used when a line is longer than a given number of characters.'),
'C0302': ('Too many lines in module (%s)', # was W0302
'too-many-lines',
'Used when a module has too much lines, reducing its readability.'
),
'W0311': ('Bad indentation. Found %s %s, expected %s',
'bad-indentation',
'Used when an unexpected number of indentation\'s tabulations or '
'spaces has been found.'),
'W0312': ('Found indentation with %ss instead of %ss',
'mixed-indentation',
'Used when there are some mixed tabs and spaces in a module.'),
'W0301': ('Unnecessary semicolon', # was W0106
'unnecessary-semicolon',
'Used when a statement is ended by a semi-colon (";"), which \
isn\'t necessary (that\'s python, not C ;).'),
'C0321': ('More than one statement on a single line',
'multiple-statements',
'Used when more than on statement are found on the same line.',
{'scope': WarningScope.NODE}),
'C0322': ('Operator not preceded by a space\n%s',
'no-space-before-operator',
'Used when one of the following operator (!= | <= | == | >= | < '
'| > | = | \\+= | -= | \\*= | /= | %) is not preceded by a space.',
{'scope': WarningScope.NODE}),
'C0323': ('Operator not followed by a space\n%s',
'no-space-after-operator',
'Used when one of the following operator (!= | <= | == | >= | < '
'| > | = | \\+= | -= | \\*= | /= | %) is not followed by a space.',
{'scope': WarningScope.NODE}),
'C0324': ('Comma not followed by a space\n%s',
'no-space-after-comma',
'Used when a comma (",") is not followed by a space.',
{'scope': WarningScope.NODE}),
}
if sys.version_info < (3, 0):
MSGS.update({
'W0331': ('Use of the <> operator',
'old-ne-operator',
'Used when the deprecated "<>" operator is used instead \
of "!=".'),
'W0332': ('Use of "l" as long integer identifier',
'lowercase-l-suffix',
'Used when a lower case "l" is used to mark a long integer. You '
'should use a upper case "L" since the letter "l" looks too much '
'like the digit "1"'),
'W0333': ('Use of the `` operator',
'backtick',
'Used when the deprecated "``" (backtick) operator is used '
'instead of the str() function.',
{'scope': WarningScope.NODE}),
})
# simple quoted string rgx
SQSTRING_RGX = r'"([^"\\]|\\.)*?"'
# simple apostrophed rgx
SASTRING_RGX = r"'([^'\\]|\\.)*?'"
# triple quoted string rgx
TQSTRING_RGX = r'"""([^"]|("(?!"")))*?(""")'
# triple apostrophe'd string rgx
TASTRING_RGX = r"'''([^']|('(?!'')))*?(''')"
# finally, the string regular expression
STRING_RGX = re.compile('(%s)|(%s)|(%s)|(%s)' % (TQSTRING_RGX, TASTRING_RGX,
SQSTRING_RGX, SASTRING_RGX),
re.MULTILINE|re.DOTALL)
COMMENT_RGX = re.compile("#.*$", re.M)
OPERATORS = r'!=|<=|==|>=|<|>|=|\+=|-=|\*=|/=|%'
OP_RGX_MATCH_1 = r'[^(]*(?<!\s|\^|<|>|=|\+|-|\*|/|!|%%|&|\|)(%s).*' % OPERATORS
OP_RGX_SEARCH_1 = r'(?<!\s|\^|<|>|=|\+|-|\*|/|!|%%|&|\|)(%s)' % OPERATORS
OP_RGX_MATCH_2 = r'[^(]*(%s)(?!\s|=|>|<).*' % OPERATORS
OP_RGX_SEARCH_2 = r'(%s)(?!\s|=|>)' % OPERATORS
BAD_CONSTRUCT_RGXS = (
(re.compile(OP_RGX_MATCH_1, re.M),
re.compile(OP_RGX_SEARCH_1, re.M),
'C0322'),
(re.compile(OP_RGX_MATCH_2, re.M),
re.compile(OP_RGX_SEARCH_2, re.M),
'C0323'),
(re.compile(r'.*,[^(\s|\]|}|\))].*', re.M),
re.compile(r',[^\s)]', re.M),
'C0324'),
)
def get_string_coords(line):
"""return a list of string positions (tuple (start, end)) in the line
"""
result = []
for match in re.finditer(STRING_RGX, line):
result.append( (match.start(), match.end()) )
return result
def in_coords(match, string_coords):
"""return true if the match is in the string coord"""
mstart = match.start()
for start, end in string_coords:
if mstart >= start and mstart < end:
return True
return False
def check_line(line):
"""check a line for a bad construction
if it founds one, return a message describing the problem
else return None
"""
cleanstr = COMMENT_RGX.sub('', STRING_RGX.sub('', line))
for rgx_match, rgx_search, msg_id in BAD_CONSTRUCT_RGXS:
if rgx_match.match(cleanstr):
string_positions = get_string_coords(line)
for match in re.finditer(rgx_search, line):
if not in_coords(match, string_positions):
return msg_id, pretty_match(match, line.rstrip())
class FormatChecker(BaseRawChecker):
"""checks for :
* unauthorized constructions
* strict indentation
* line length
* use of <> instead of !=
"""
__implements__ = (IRawChecker, IASTNGChecker)
# configuration section name
name = 'format'
# messages
msgs = MSGS
# configuration options
# for available dict keys/values see the optik parser 'add_option' method
options = (('max-line-length',
{'default' : 80, 'type' : "int", 'metavar' : '<int>',
'help' : 'Maximum number of characters on a single line.'}),
('max-module-lines',
{'default' : 1000, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of lines in a module'}
),
('indent-string',
{'default' : ' ', 'type' : "string", 'metavar' : '<string>',
'help' : 'String used as indentation unit. This is usually \
" " (4 spaces) or "\\t" (1 tab).'}),
)
def __init__(self, linter=None):
BaseRawChecker.__init__(self, linter)
self._lines = None
self._visited_lines = None
def process_module(self, node):
"""extracts encoding from the stream and decodes each line, so that
international text's length is properly calculated.
"""
stream = node.file_stream
stream.seek(0) # XXX may be removed with astng > 0.23
readline = stream.readline
if sys.version_info < (3, 0):
if node.file_encoding is not None:
readline = lambda: stream.readline().decode(node.file_encoding, 'replace')
self.process_tokens(tokenize.generate_tokens(readline))
def new_line(self, tok_type, line, line_num, junk):
"""a new line has been encountered, process it if necessary"""
if not tok_type in junk:
self._lines[line_num] = line.split('\n')[0]
self.check_lines(line, line_num)
def process_tokens(self, tokens):
"""process tokens and search for :
_ non strict indentation (i.e. not always using the <indent> parameter as
indent unit)
_ too long lines (i.e. longer than <max_chars>)
_ optionally bad construct (if given, bad_construct must be a compiled
regular expression).
"""
indent = tokenize.INDENT
dedent = tokenize.DEDENT
newline = tokenize.NEWLINE
junk = (tokenize.COMMENT, tokenize.NL)
indents = [0]
check_equal = 0
line_num = 0
previous = None
self._lines = {}
self._visited_lines = {}
for (tok_type, token, start, _, line) in tokens:
if start[0] != line_num:
if previous is not None and previous[0] == tokenize.OP and previous[1] == ';':
self.add_message('W0301', line=previous[2])
previous = None
line_num = start[0]
self.new_line(tok_type, line, line_num, junk)
if tok_type not in (indent, dedent, newline) + junk:
previous = tok_type, token, start[0]
if tok_type == tokenize.OP:
if token == '<>':
self.add_message('W0331', line=line_num)
elif tok_type == tokenize.NUMBER:
if token.endswith('l'):
self.add_message('W0332', line=line_num)
elif tok_type == newline:
# a program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
# If an INDENT appears, setting check_equal is wrong, and will
# be undone when we see the INDENT.
check_equal = 1
elif tok_type == indent:
check_equal = 0
self.check_indent_level(token, indents[-1]+1, line_num)
indents.append(indents[-1]+1)
elif tok_type == dedent:
# there's nothing we need to check here! what's important is
# that when the run of DEDENTs ends, the indentation of the
# program statement (or ENDMARKER) that triggered the run is
# equal to what's left at the top of the indents stack
check_equal = 1
if len(indents) > 1:
del indents[-1]
elif check_equal and tok_type not in junk:
# this is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER; the "line" argument exposes the leading whitespace
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# "indents" stack was seeded
check_equal = 0
self.check_indent_level(line, indents[-1], line_num)
line_num -= 1 # to be ok with "wc -l"
if line_num > self.config.max_module_lines:
self.add_message('C0302', args=line_num, line=1)
@check_messages('C0321' ,'C03232', 'C0323', 'C0324')
def visit_default(self, node):
"""check the node line number and check it if not yet done"""
if not node.is_statement:
return
if not node.root().pure_python:
return # XXX block visit of child nodes
prev_sibl = node.previous_sibling()
if prev_sibl is not None:
prev_line = prev_sibl.fromlineno
else:
prev_line = node.parent.statement().fromlineno
line = node.fromlineno
assert line, node
if prev_line == line and self._visited_lines.get(line) != 2:
# py2.5 try: except: finally:
if not (isinstance(node, nodes.TryExcept)
and isinstance(node.parent, nodes.TryFinally)
and node.fromlineno == node.parent.fromlineno):
self.add_message('C0321', node=node)
self._visited_lines[line] = 2
return
if line in self._visited_lines:
return
try:
tolineno = node.blockstart_tolineno
except AttributeError:
tolineno = node.tolineno
assert tolineno, node
lines = []
for line in range(line, tolineno + 1):
self._visited_lines[line] = 1
try:
lines.append(self._lines[line].rstrip())
except KeyError:
lines.append('')
try:
msg_def = check_line('\n'.join(lines))
if msg_def:
self.add_message(msg_def[0], node=node, args=msg_def[1])
except KeyError:
# FIXME: internal error !
pass
@check_messages('W0333')
def visit_backquote(self, node):
self.add_message('W0333', node=node)
def check_lines(self, lines, i):
"""check lines have less than a maximum number of characters
"""
max_chars = self.config.max_line_length
for line in lines.splitlines():
if len(line) > max_chars:
self.add_message('C0301', line=i, args=(len(line), max_chars))
i += 1
def check_indent_level(self, string, expected, line_num):
"""return the indent level of the string
"""
indent = self.config.indent_string
if indent == '\\t': # \t is not interpreted in the configuration file
indent = '\t'
level = 0
unit_size = len(indent)
while string[:unit_size] == indent:
string = string[unit_size:]
level += 1
suppl = ''
while string and string[0] in ' \t':
if string[0] != indent[0]:
if string[0] == '\t':
args = ('tab', 'space')
else:
args = ('space', 'tab')
self.add_message('W0312', args=args, line=line_num)
return level
suppl += string[0]
string = string [1:]
if level != expected or suppl:
i_type = 'spaces'
if indent[0] == '\t':
i_type = 'tabs'
self.add_message('W0311', line=line_num,
args=(level * unit_size + len(suppl), i_type,
expected * unit_size))
def register(linter):
"""required method to auto register this checker """
linter.register_checker(FormatChecker(linter))
|
tlksio/tlksio
|
env/lib/python3.4/site-packages/pylint/checkers/format.py
|
Python
|
mit
| 15,049
|
[
"VisIt"
] |
4d7919796e01d7de5374e7b96dea1b68db3682f821a37e694d0f61701995dd08
|
""" Classes and functions for Symmetric Diffeomorphic Registration """
import logging
import abc
import numpy as np
import numpy.linalg as npl
import nibabel as nib
from dipy.align import vector_fields as vfu
from dipy.align import floating
from dipy.align import VerbosityLevels
from dipy.align import Bunch
from dipy.align.scalespace import ScaleSpace
RegistrationStages = Bunch(INIT_START=0,
INIT_END=1,
OPT_START=2,
OPT_END=3,
SCALE_START=4,
SCALE_END=5,
ITER_START=6,
ITER_END=7)
"""Registration Stages
This enum defines the different stages which the Volumetric Registration
may be in. The value of the stage is passed as a parameter to the call-back
function so that it can react accordingly.
INIT_START: optimizer initialization starts
INIT_END: optimizer initialization ends
OPT_START: optimization starts
OPT_END: optimization ends
SCALE_START: optimization at a new scale space resolution starts
SCALE_END: optimization at the current scale space resolution ends
ITER_START: a new iteration starts
ITER_END: the current iteration ends
"""
logger = logging.getLogger(__name__)
def mult_aff(A, B):
"""Returns the matrix product A.dot(B) considering None as the identity
Parameters
----------
A : array, shape (n,k)
B : array, shape (k,m)
Returns
-------
The matrix product A.dot(B). If any of the input matrices is None, it is
treated as the identity matrix. If both matrices are None, None is returned
"""
if A is None:
return B
elif B is None:
return A
return A.dot(B)
def get_direction_and_spacings(affine, dim):
"""Extracts the rotational and spacing components from a matrix
Extracts the rotational and spacing (voxel dimensions) components from a
matrix. An image gradient represents the local variation of the image's
gray values per voxel. Since we are iterating on the physical space, we
need to compute the gradients as variation per millimeter, so we need to
divide each gradient's component by the voxel size along the corresponding
axis, that's what the spacings are used for. Since the image's gradients
are oriented along the grid axes, we also need to re-orient the gradients
to be given in physical space coordinates.
Parameters
----------
affine : array, shape (k, k), k = 3, 4
the matrix transforming grid coordinates to physical space.
Returns
-------
direction : array, shape (k-1, k-1)
the rotational component of the input matrix
spacings : array, shape (k-1,)
the scaling component (voxel size) of the matrix
"""
if affine is None:
return np.eye(dim), np.ones(dim)
dim = affine.shape[1]-1
# Temporary hack: get the zooms by building a nifti image
affine4x4 = np.eye(4)
empty_volume = np.zeros((0, 0, 0))
affine4x4[:dim, :dim] = affine[:dim, :dim]
affine4x4[:dim, 3] = affine[:dim, dim-1]
nib_nifti = nib.Nifti1Image(empty_volume, affine4x4)
scalings = np.asarray(nib_nifti.header.get_zooms())
scalings = np.asarray(scalings[:dim], dtype=np.float64)
A = affine[:dim, :dim]
return A.dot(np.diag(1.0/scalings)), scalings
class DiffeomorphicMap(object):
def __init__(self,
dim,
disp_shape,
disp_grid2world=None,
domain_shape=None,
domain_grid2world=None,
codomain_shape=None,
codomain_grid2world=None,
prealign=None):
""" DiffeomorphicMap
Implements a diffeomorphic transformation on the physical space. The
deformation fields encoding the direct and inverse transformations
share the same domain discretization (both the discretization grid
shape and voxel-to-space matrix). The input coordinates (physical
coordinates) are first aligned using prealign, and then displaced
using the corresponding vector field interpolated at the aligned
coordinates.
Parameters
----------
dim : int, 2 or 3
the transformation's dimension
disp_shape : array, shape (dim,)
the number of slices (if 3D), rows and columns of the deformation
field's discretization
disp_grid2world : the voxel-to-space transform between the def. fields
grid and space
domain_shape : array, shape (dim,)
the number of slices (if 3D), rows and columns of the default
discretization of this map's domain
domain_grid2world : array, shape (dim+1, dim+1)
the default voxel-to-space transformation between this map's
discretization and physical space
codomain_shape : array, shape (dim,)
the number of slices (if 3D), rows and columns of the images that
are 'normally' warped using this transformation in the forward
direction (this will provide default transformation parameters to
warp images under this transformation). By default, we assume that
the inverse transformation is 'normally' used to warp images with
the same discretization and voxel-to-space transformation as the
deformation field grid.
codomain_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of images that are 'normally'
warped using this transformation (in the forward direction).
prealign : array, shape (dim+1, dim+1)
the linear transformation to be applied to align input images to
the reference space before warping under the deformation field.
"""
self.dim = dim
if(disp_shape is None):
raise ValueError("Invalid displacement field discretization")
self.disp_shape = np.asarray(disp_shape, dtype=np.int32)
# If the discretization affine is None, we assume it's the identity
self.disp_grid2world = disp_grid2world
if(self.disp_grid2world is None):
self.disp_world2grid = None
else:
self.disp_world2grid = npl.inv(self.disp_grid2world)
# If domain_shape isn't provided, we use the map's discretization shape
if(domain_shape is None):
self.domain_shape = self.disp_shape
else:
self.domain_shape = np.asarray(domain_shape, dtype=np.int32)
self.domain_grid2world = domain_grid2world
if(domain_grid2world is None):
self.domain_world2grid = None
else:
self.domain_world2grid = npl.inv(domain_grid2world)
# If codomain shape was not provided, we assume it is an endomorphism:
# use the same domain_shape and codomain_grid2world as the field domain
if codomain_shape is None:
self.codomain_shape = self.domain_shape
else:
self.codomain_shape = np.asarray(codomain_shape, dtype=np.int32)
self.codomain_grid2world = codomain_grid2world
if codomain_grid2world is None:
self.codomain_world2grid = None
else:
self.codomain_world2grid = npl.inv(codomain_grid2world)
self.prealign = prealign
if prealign is None:
self.prealign_inv = None
else:
self.prealign_inv = npl.inv(prealign)
self.is_inverse = False
self.forward = None
self.backward = None
def interpret_matrix(self, obj):
""" Try to interpret `obj` as a matrix
Some operations are performed faster if we know in advance if a matrix
is the identity (so we can skip the actual matrix-vector
multiplication). This function returns None if the given object
is None or the 'identity' string. It returns the same object if it is
a numpy array. It raises an exception otherwise.
Parameters
----------
obj : object
any object
Returns
----------
obj : object
the same object given as argument if `obj` is None or a numpy
array. None if `obj` is the 'identity' string.
"""
if (obj is None) or isinstance(obj, np.ndarray):
return obj
if isinstance(obj, str) and (obj == 'identity'):
return None
raise ValueError('Invalid matrix')
def get_forward_field(self):
"""Deformation field to transform an image in the forward direction
Returns the deformation field that must be used to warp an image under
this transformation in the forward direction (note the 'is_inverse'
flag).
"""
if self.is_inverse:
return self.backward
else:
return self.forward
def get_backward_field(self):
"""Deformation field to transform an image in the backward direction
Returns the deformation field that must be used to warp an image under
this transformation in the backward direction (note the 'is_inverse'
flag).
"""
if self.is_inverse:
return self.forward
else:
return self.backward
def allocate(self):
"""Creates a zero displacement field
Creates a zero displacement field (the identity transformation).
"""
self.forward = np.zeros(tuple(self.disp_shape) + (self.dim,),
dtype=floating)
self.backward = np.zeros(tuple(self.disp_shape) + (self.dim,),
dtype=floating)
def _get_warping_function(self, interpolation):
"""Appropriate warping function for the given interpolation type
Returns the right warping function from vector_fields that must be
called for the specified data dimension and interpolation type
"""
if self.dim == 2:
if interpolation == 'linear':
return vfu.warp_2d
else:
return vfu.warp_2d_nn
else:
if interpolation == 'linear':
return vfu.warp_3d
else:
return vfu.warp_3d_nn
def _warp_forward(self, image, interpolation='linear',
image_world2grid=None, out_shape=None,
out_grid2world=None):
"""Warps an image in the forward direction
Deforms the input image under this diffeomorphic map in the forward
direction. Since the mapping is defined in the physical space, the user
must specify the sampling grid shape and its space-to-voxel mapping.
By default, the transformation will use the discretization information
given at initialization.
Parameters
----------
image : array, shape (s, r, c) if dim = 3 or (r, c) if dim = 2
the image to be warped under this transformation in the forward
direction
interpolation : string, either 'linear' or 'nearest'
the type of interpolation to be used for warping, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_world2grid : array, shape (dim+1, dim+1)
the transformation bringing world (space) coordinates to voxel
coordinates of the image given as input
out_shape : array, shape (dim,)
the number of slices, rows, and columns of the desired warped image
out_grid2world : the transformation bringing voxel coordinates of the
warped image to physical space
Returns
-------
warped : array, shape = out_shape or self.codomain_shape if None
the warped image under this transformation in the forward direction
Notes
-----
A diffeomorphic map must be thought as a mapping between points
in space. Warping an image J towards an image I means transforming
each voxel with (discrete) coordinates i in I to (floating-point) voxel
coordinates j in J. The transformation we consider 'forward' is
precisely mapping coordinates i from the input image to coordinates j
from reference image, which has the effect of warping an image with
reference discretization (typically, the "static image") "towards" an
image with input discretization (typically, the "moving image"). More
precisely, the warped image is produced by the following interpolation:
warped[i] = image[W * forward[Dinv * P * S * i] + W * P * S * i )]
where i denotes the coordinates of a voxel in the input grid, W is
the world-to-grid transformation of the image given as input, Dinv
is the world-to-grid transformation of the deformation field
discretization, P is the pre-aligning matrix (transforming input
points to reference points), S is the voxel-to-space transformation of
the sampling grid (see comment below) and forward is the forward
deformation field.
If we want to warp an image, we also must specify on what grid we
want to sample the resulting warped image (the images are considered as
points in space and its representation on a grid depends on its
grid-to-space transform telling us for each grid voxel what point in
space we need to bring via interpolation). So, S is the matrix that
converts the sampling grid (whose shape is given as parameter
'out_shape' ) to space coordinates.
"""
# if no world-to-image transform is provided, we use the codomain info
if image_world2grid is None:
image_world2grid = self.codomain_world2grid
# if no sampling info is provided, we use the domain info
if out_shape is None:
if self.domain_shape is None:
raise ValueError('Unable to infer sampling info. '
'Provide a valid out_shape.')
out_shape = self.domain_shape
else:
out_shape = np.asarray(out_shape, dtype=np.int32)
if out_grid2world is None:
out_grid2world = self.domain_grid2world
W = self.interpret_matrix(image_world2grid)
Dinv = self.disp_world2grid
P = self.prealign
S = self.interpret_matrix(out_grid2world)
# this is the matrix which we need to multiply the voxel coordinates
# to interpolate on the forward displacement field ("in"side the
# 'forward' brackets in the expression above)
affine_idx_in = mult_aff(Dinv, mult_aff(P, S))
# this is the matrix which we need to multiply the voxel coordinates
# to add to the displacement ("out"side the 'forward' brackets in the
# expression above)
affine_idx_out = mult_aff(W, mult_aff(P, S))
# this is the matrix which we need to multiply the displacement vector
# prior to adding to the transformed input point
affine_disp = W
# Convert the data to required types to use the cythonized functions
if interpolation == 'nearest':
if image.dtype is np.dtype('float64') and floating is np.float32:
image = image.astype(floating)
elif image.dtype is np.dtype('int64'):
image = image.astype(np.int32)
else:
image = np.asarray(image, dtype=floating)
warp_f = self._get_warping_function(interpolation)
warped = warp_f(image, self.forward, affine_idx_in, affine_idx_out,
affine_disp, out_shape)
return warped
def _warp_backward(self, image, interpolation='linear',
image_world2grid=None, out_shape=None,
out_grid2world=None):
"""Warps an image in the backward direction
Deforms the input image under this diffeomorphic map in the backward
direction. Since the mapping is defined in the physical space, the user
must specify the sampling grid shape and its space-to-voxel mapping.
By default, the transformation will use the discretization information
given at initialization.
Parameters
----------
image : array, shape (s, r, c) if dim = 3 or (r, c) if dim = 2
the image to be warped under this transformation in the backward
direction
interpolation : string, either 'linear' or 'nearest'
the type of interpolation to be used for warping, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_world2grid : array, shape (dim+1, dim+1)
the transformation bringing world (space) coordinates to voxel
coordinates of the image given as input
out_shape : array, shape (dim,)
the number of slices, rows and columns of the desired warped image
out_grid2world : the transformation bringing voxel coordinates of the
warped image to physical space
Returns
-------
warped : array, shape = out_shape or self.domain_shape if None
the warped image under this transformation in the backward
direction
Notes
-----
A diffeomorphic map must be thought as a mapping between points
in space. Warping an image J towards an image I means transforming
each voxel with (discrete) coordinates i in I to (floating-point) voxel
coordinates j in J. The transformation we consider 'backward' is
precisely mapping coordinates i from the reference grid to coordinates
j from the input image (that's why it's "backward"), which has the
effect of warping the input image (moving) "towards" the reference.
More precisely, the warped image is produced by the following
interpolation:
warped[i]=image[W * Pinv * backward[Dinv * S * i] + W * Pinv * S * i )]
where i denotes the coordinates of a voxel in the input grid, W is
the world-to-grid transformation of the image given as input, Dinv
is the world-to-grid transformation of the deformation field
discretization, Pinv is the pre-aligning matrix's inverse (transforming
reference points to input points), S is the grid-to-space
transformation of the sampling grid (see comment below) and backward is
the backward deformation field.
If we want to warp an image, we also must specify on what grid we
want to sample the resulting warped image (the images are considered as
points in space and its representation on a grid depends on its
grid-to-space transform telling us for each grid voxel what point in
space we need to bring via interpolation). So, S is the matrix that
converts the sampling grid (whose shape is given as parameter
'out_shape' ) to space coordinates.
"""
# if no world-to-image transform is provided, we use the domain info
if image_world2grid is None:
image_world2grid = self.domain_world2grid
# if no sampling info is provided, we use the codomain info
if out_shape is None:
if self.codomain_shape is None:
msg = 'Unknown sampling info. Provide a valid out_shape.'
raise ValueError(msg)
out_shape = self.codomain_shape
if out_grid2world is None:
out_grid2world = self.codomain_grid2world
W = self.interpret_matrix(image_world2grid)
Dinv = self.disp_world2grid
Pinv = self.prealign_inv
S = self.interpret_matrix(out_grid2world)
# this is the matrix which we need to multiply the voxel coordinates
# to interpolate on the backward displacement field ("in"side the
# 'backward' brackets in the expression above)
affine_idx_in = mult_aff(Dinv, S)
# this is the matrix which we need to multiply the voxel coordinates
# to add to the displacement ("out"side the 'backward' brackets in the
# expression above)
affine_idx_out = mult_aff(W, mult_aff(Pinv, S))
# this is the matrix which we need to multiply the displacement vector
# prior to adding to the transformed input point
affine_disp = mult_aff(W, Pinv)
if interpolation == 'nearest':
if image.dtype is np.dtype('float64') and floating is np.float32:
image = image.astype(floating)
elif image.dtype is np.dtype('int64'):
image = image.astype(np.int32)
else:
image = np.asarray(image, dtype=floating)
warp_f = self._get_warping_function(interpolation)
warped = warp_f(image, self.backward, affine_idx_in, affine_idx_out,
affine_disp, out_shape)
return warped
def transform(self, image, interpolation='linear', image_world2grid=None,
out_shape=None, out_grid2world=None):
"""Warps an image in the forward direction
Transforms the input image under this transformation in the forward
direction. It uses the "is_inverse" flag to switch between "forward"
and "backward" (if is_inverse is False, then transform(...) warps the
image forwards, else it warps the image backwards).
Parameters
----------
image : array, shape (s, r, c) if dim = 3 or (r, c) if dim = 2
the image to be warped under this transformation in the forward
direction
interpolation : string, either 'linear' or 'nearest'
the type of interpolation to be used for warping, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_world2grid : array, shape (dim+1, dim+1)
the transformation bringing world (space) coordinates to voxel
coordinates of the image given as input
out_shape : array, shape (dim,)
the number of slices, rows and columns of the desired warped image
out_grid2world : the transformation bringing voxel coordinates of the
warped image to physical space
Returns
-------
warped : array, shape = out_shape or self.codomain_shape if None
the warped image under this transformation in the forward direction
Notes
-----
See _warp_forward and _warp_backward documentation for further
information.
"""
if out_shape is not None:
out_shape = np.asarray(out_shape, dtype=np.int32)
if self.is_inverse:
warped = self._warp_backward(image, interpolation,
image_world2grid, out_shape,
out_grid2world)
else:
warped = self._warp_forward(image, interpolation, image_world2grid,
out_shape, out_grid2world)
return np.asarray(warped)
def transform_inverse(self, image, interpolation='linear',
image_world2grid=None, out_shape=None,
out_grid2world=None):
"""Warps an image in the backward direction
Transforms the input image under this transformation in the backward
direction. It uses the "is_inverse" flag to switch between "forward"
and "backward" (if is_inverse is False, then transform_inverse(...)
warps the image backwards, else it warps the image forwards)
Parameters
----------
image : array, shape (s, r, c) if dim = 3 or (r, c) if dim = 2
the image to be warped under this transformation in the forward
direction
interpolation : string, either 'linear' or 'nearest'
the type of interpolation to be used for warping, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_world2grid : array, shape (dim+1, dim+1)
the transformation bringing world (space) coordinates to voxel
coordinates of the image given as input
out_shape : array, shape (dim,)
the number of slices, rows, and columns of the desired warped image
out_grid2world : the transformation bringing voxel coordinates of the
warped image to physical space
Returns
-------
warped : array, shape = out_shape or self.codomain_shape if None
warped image under this transformation in the backward direction
Notes
-----
See _warp_forward and _warp_backward documentation for further
information.
"""
if self.is_inverse:
warped = self._warp_forward(image, interpolation, image_world2grid,
out_shape, out_grid2world)
else:
warped = self._warp_backward(image, interpolation,
image_world2grid, out_shape,
out_grid2world)
return np.asarray(warped)
def inverse(self):
"""Inverse of this DiffeomorphicMap instance
Returns a diffeomorphic map object representing the inverse of this
transformation. The internal arrays are not copied but just referenced.
Returns
-------
inv : DiffeomorphicMap object
the inverse of this diffeomorphic map.
"""
inv = DiffeomorphicMap(self.dim,
self.disp_shape,
self.disp_grid2world,
self.domain_shape,
self.domain_grid2world,
self.codomain_shape,
self.codomain_grid2world,
self.prealign)
inv.forward = self.forward
inv.backward = self.backward
inv.is_inverse = True
return inv
def expand_fields(self, expand_factors, new_shape):
"""Expands the displacement fields from current shape to new_shape
Up-samples the discretization of the displacement fields to be of
new_shape shape.
Parameters
----------
expand_factors : array, shape (dim,)
the factors scaling current spacings (voxel sizes) to spacings in
the expanded discretization.
new_shape : array, shape (dim,)
the shape of the arrays holding the up-sampled discretization
"""
if self.dim == 2:
expand_f = vfu.resample_displacement_field_2d
else:
expand_f = vfu.resample_displacement_field_3d
expanded_forward = expand_f(self.forward, expand_factors, new_shape)
expanded_backward = expand_f(self.backward, expand_factors, new_shape)
expand_factors = np.append(expand_factors, [1])
expanded_grid2world = mult_aff(self.disp_grid2world,
np.diag(expand_factors))
expanded_world2grid = npl.inv(expanded_grid2world)
self.forward = expanded_forward
self.backward = expanded_backward
self.disp_shape = new_shape
self.disp_grid2world = expanded_grid2world
self.disp_world2grid = expanded_world2grid
def compute_inversion_error(self):
"""Inversion error of the displacement fields
Estimates the inversion error of the displacement fields by computing
statistics of the residual vectors obtained after composing the forward
and backward displacement fields.
Returns
-------
residual : array, shape (R, C) or (S, R, C)
the displacement field resulting from composing the forward and
backward displacement fields of this transformation (the residual
should be zero for a perfect diffeomorphism)
stats : array, shape (3,)
statistics from the norms of the vectors of the residual
displacement field: maximum, mean and standard deviation
Notes
-----
Since the forward and backward displacement fields have the same
discretization, the final composition is given by
comp[i] = forward[ i + Dinv * backward[i]]
where Dinv is the space-to-grid transformation of the displacement
fields
"""
Dinv = self.disp_world2grid
if self.dim == 2:
compose_f = vfu.compose_vector_fields_2d
else:
compose_f = vfu.compose_vector_fields_3d
residual, stats = compose_f(self.backward, self.forward,
None, Dinv, 1.0, None)
return np.asarray(residual), np.asarray(stats)
def shallow_copy(self):
"""Shallow copy of this DiffeomorphicMap instance
Creates a shallow copy of this diffeomorphic map (the arrays are not
copied but just referenced)
Returns
-------
new_map : DiffeomorphicMap object
the shallow copy of this diffeomorphic map
"""
new_map = DiffeomorphicMap(self.dim,
self.disp_shape,
self.disp_grid2world,
self.domain_shape,
self.domain_grid2world,
self.codomain_shape,
self.codomain_grid2world,
self.prealign)
new_map.forward = self.forward
new_map.backward = self.backward
new_map.is_inverse = self.is_inverse
return new_map
def warp_endomorphism(self, phi):
"""Composition of this DiffeomorphicMap with a given endomorphism
Creates a new DiffeomorphicMap C with the same properties as self and
composes its displacement fields with phi's corresponding fields.
The resulting diffeomorphism is of the form C(x) = phi(self(x)) with
inverse C^{-1}(y) = self^{-1}(phi^{-1}(y)). We assume that phi is an
endomorphism with the same discretization and domain affine as self
to ensure that the composition inherits self's properties (we also
assume that the pre-aligning matrix of phi is None or identity).
Parameters
----------
phi : DiffeomorphicMap object
the endomorphism to be warped by this diffeomorphic map
Returns
-------
composition : the composition of this diffeomorphic map with the
endomorphism given as input
Notes
-----
The problem with our current representation of a DiffeomorphicMap is
that the set of Diffeomorphism that can be represented this way (a
pre-aligning matrix followed by a non-linear endomorphism given as a
displacement field) is not closed under the composition operation.
Supporting a general DiffeomorphicMap class, closed under composition,
may be extremely costly computationally, and the kind of
transformations we actually need for Avants' mid-point algorithm (SyN)
are much simpler.
"""
# Compose the forward deformation fields
d1 = self.get_forward_field()
d2 = phi.get_forward_field()
d1_inv = self.get_backward_field()
d2_inv = phi.get_backward_field()
premult_disp = self.disp_world2grid
if self.dim == 2:
compose_f = vfu.compose_vector_fields_2d
else:
compose_f = vfu.compose_vector_fields_3d
forward, stats = compose_f(d1, d2, None, premult_disp, 1.0, None)
backward, stats, = compose_f(d2_inv, d1_inv, None, premult_disp, 1.0,
None)
composition = self.shallow_copy()
composition.forward = forward
composition.backward = backward
return composition
def get_simplified_transform(self):
""" Constructs a simplified version of this Diffeomorhic Map
The simplified version incorporates the pre-align transform, as well as
the domain and codomain affine transforms into the displacement field.
The resulting transformation may be regarded as operating on the
image spaces given by the domain and codomain discretization. As a
result, self.prealign, self.disp_grid2world, self.domain_grid2world and
self.codomain affine will be None (denoting Identity) in the resulting
diffeomorphic map.
"""
if self.dim == 2:
simplify_f = vfu.simplify_warp_function_2d
else:
simplify_f = vfu.simplify_warp_function_3d
# Simplify the forward transform
D = self.domain_grid2world
P = self.prealign
Rinv = self.disp_world2grid
Cinv = self.codomain_world2grid
# this is the matrix which we need to multiply the voxel coordinates
# to interpolate on the forward displacement field ("in"side the
# 'forward' brackets in the expression above)
affine_idx_in = mult_aff(Rinv, mult_aff(P, D))
# this is the matrix which we need to multiply the voxel coordinates
# to add to the displacement ("out"side the 'forward' brackets in the
# expression above)
affine_idx_out = mult_aff(Cinv, mult_aff(P, D))
# this is the matrix which we need to multiply the displacement vector
# prior to adding to the transformed input point
affine_disp = Cinv
new_forward = simplify_f(self.forward, affine_idx_in,
affine_idx_out, affine_disp,
self.domain_shape)
# Simplify the backward transform
C = self.codomain_world2grid
Pinv = self.prealign_inv
Dinv = self.domain_world2grid
affine_idx_in = mult_aff(Rinv, C)
affine_idx_out = mult_aff(Dinv, mult_aff(Pinv, C))
affine_disp = mult_aff(Dinv, Pinv)
new_backward = simplify_f(self.backward, affine_idx_in,
affine_idx_out, affine_disp,
self.codomain_shape)
simplified = DiffeomorphicMap(self.dim,
self.disp_shape,
None,
self.domain_shape,
None,
self.codomain_shape,
None,
None)
simplified.forward = new_forward
simplified.backward = new_backward
return simplified
class DiffeomorphicRegistration(object, metaclass=abc.ABCMeta):
def __init__(self, metric=None):
""" Diffeomorphic Registration
This abstract class defines the interface to be implemented by any
optimization algorithm for diffeomorphic registration.
Parameters
----------
metric : SimilarityMetric object
the object measuring the similarity of the two images. The
registration algorithm will minimize (or maximize) the provided
similarity.
"""
if metric is None:
raise ValueError('The metric cannot be None')
self.metric = metric
self.dim = metric.dim
def set_level_iters(self, level_iters):
"""Sets the number of iterations at each pyramid level
Establishes the maximum number of iterations to be performed at each
level of the Gaussian pyramid, similar to ANTS.
Parameters
----------
level_iters : list
the number of iterations at each level of the Gaussian pyramid.
level_iters[0] corresponds to the finest level, level_iters[n-1]
the coarsest, where n is the length of the list
"""
self.levels = len(level_iters) if level_iters else 0
self.level_iters = level_iters
@abc.abstractmethod
def optimize(self):
"""Starts the metric optimization
This is the main function each specialized class derived from this must
implement. Upon completion, the deformation field must be available
from the forward transformation model.
"""
@abc.abstractmethod
def get_map(self):
"""
Returns the resulting diffeomorphic map after optimization
"""
class SymmetricDiffeomorphicRegistration(DiffeomorphicRegistration):
def __init__(self,
metric,
level_iters=None,
step_length=0.25,
ss_sigma_factor=0.2,
opt_tol=1e-5,
inv_iter=20,
inv_tol=1e-3,
callback=None):
""" Symmetric Diffeomorphic Registration (SyN) Algorithm
Performs the multi-resolution optimization algorithm for non-linear
registration using a given similarity metric.
Parameters
----------
metric : SimilarityMetric object
the metric to be optimized
level_iters : list of int
the number of iterations at each level of the Gaussian Pyramid (the
length of the list defines the number of pyramid levels to be
used)
opt_tol : float
the optimization will stop when the estimated derivative of the
energy profile w.r.t. time falls below this threshold
inv_iter : int
the number of iterations to be performed by the displacement field
inversion algorithm
step_length : float
the length of the maximum displacement vector of the update
displacement field at each iteration
ss_sigma_factor : float
parameter of the scale-space smoothing kernel. For example, the
std. dev. of the kernel will be factor*(2^i) in the isotropic case
where i = 0, 1, ..., n_scales is the scale
inv_tol : float
the displacement field inversion algorithm will stop iterating
when the inversion error falls below this threshold
callback : function(SymmetricDiffeomorphicRegistration)
a function receiving a SymmetricDiffeomorphicRegistration object
to be called after each iteration (this optimizer will call this
function passing self as parameter)
"""
super(SymmetricDiffeomorphicRegistration, self).__init__(metric)
if level_iters is None:
level_iters = [100, 100, 25]
if len(level_iters) == 0:
raise ValueError('The iterations list cannot be empty')
self.set_level_iters(level_iters)
self.step_length = step_length
self.ss_sigma_factor = ss_sigma_factor
self.opt_tol = opt_tol
self.inv_tol = inv_tol
self.inv_iter = inv_iter
self.energy_window = 12
self.energy_list = []
self.full_energy_profile = []
self.verbosity = VerbosityLevels.STATUS
self.callback = callback
self.moving_ss = None
self.static_ss = None
self.static_direction = None
self.moving_direction = None
self.mask0 = metric.mask0
def update(self, current_displacement, new_displacement,
disp_world2grid, time_scaling):
"""Composition of the current displacement field with the given field
Interpolates new displacement at the locations defined by
current_displacement. Equivalently, computes the composition C of the
given displacement fields as C(x) = B(A(x)), where A is
current_displacement and B is new_displacement. This function is
intended to be used with deformation fields of the same sampling
(e.g. to be called by a registration algorithm).
Parameters
----------
current_displacement : array, shape (R', C', 2) or (S', R', C', 3)
the displacement field defining where to interpolate
new_displacement
new_displacement : array, shape (R, C, 2) or (S, R, C, 3)
the displacement field to be warped by current_displacement
disp_world2grid : array, shape (dim+1, dim+1)
the space-to-grid transform associated with the displacements'
grid (we assume that both displacements are discretized over the
same grid)
time_scaling : float
scaling factor applied to d2. The effect may be interpreted as
moving d1 displacements along a factor (`time_scaling`) of d2.
Returns
-------
updated : array, shape (the same as new_displacement)
the warped displacement field
mean_norm : the mean norm of all vectors in current_displacement
"""
sq_field = np.sum((np.array(current_displacement) ** 2), -1)
mean_norm = np.sqrt(sq_field).mean()
# We assume that both displacement fields have the same
# grid2world transform, which implies premult_index=Identity
# and premult_disp is the world2grid transform associated with
# the displacements' grid
self.compose(current_displacement, new_displacement, None,
disp_world2grid, time_scaling, current_displacement)
return np.array(current_displacement), np.array(mean_norm)
def get_map(self):
"""Returns the resulting diffeomorphic map
Returns the DiffeomorphicMap registering the moving image towards
the static image.
"""
return self.static_to_ref
def _connect_functions(self):
"""Assign the methods to be called according to the image dimension
Assigns the appropriate functions to be called for displacement field
inversion, Gaussian pyramid, and affine / dense deformation composition
according to the dimension of the input images e.g. 2D or 3D.
"""
if self.dim == 2:
self.invert_vector_field = vfu.invert_vector_field_fixed_point_2d
self.compose = vfu.compose_vector_fields_2d
else:
self.invert_vector_field = vfu.invert_vector_field_fixed_point_3d
self.compose = vfu.compose_vector_fields_3d
def _init_optimizer(self, static, moving,
static_grid2world, moving_grid2world, prealign):
"""Initializes the registration optimizer
Initializes the optimizer by computing the scale space of the input
images and allocating the required memory for the transformation models
at the coarsest scale.
Parameters
----------
static : array, shape (S, R, C) or (R, C)
the image to be used as reference during optimization. The
displacement fields will have the same discretization as the static
image.
moving : array, shape (S, R, C) or (R, C)
the image to be used as "moving" during optimization. Since the
deformation fields' discretization is the same as the static image,
it is necessary to pre-align the moving image to ensure its domain
lies inside the domain of the deformation fields. This is assumed
to be accomplished by "pre-aligning" the moving image towards the
static using an affine transformation given by the 'prealign'
matrix
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation associated to the static image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation associated to the moving image
prealign : array, shape (dim+1, dim+1)
the affine transformation (operating on the physical space)
pre-aligning the moving image towards the static
"""
self._connect_functions()
# Extract information from affine matrices to create the scale space
static_direction, static_spacing = \
get_direction_and_spacings(static_grid2world, self.dim)
moving_direction, moving_spacing = \
get_direction_and_spacings(moving_grid2world, self.dim)
# the images' directions don't change with scale
self.static_direction = np.eye(self.dim + 1)
self.moving_direction = np.eye(self.dim + 1)
self.static_direction[:self.dim, :self.dim] = static_direction
self.moving_direction[:self.dim, :self.dim] = moving_direction
# Build the scale space of the input images
if self.verbosity >= VerbosityLevels.DIAGNOSE:
logger.info('Applying zero mask: ' + str(self.mask0))
if self.verbosity >= VerbosityLevels.STATUS:
logger.info('Creating scale space from the moving image.' +
' Levels: %d. Sigma factor: %f.' %
(self.levels, self.ss_sigma_factor))
self.moving_ss = ScaleSpace(moving, self.levels, moving_grid2world,
moving_spacing, self.ss_sigma_factor,
self.mask0)
if self.verbosity >= VerbosityLevels.STATUS:
logger.info('Creating scale space from the static image.' +
' Levels: %d. Sigma factor: %f.' %
(self.levels, self.ss_sigma_factor))
self.static_ss = ScaleSpace(static, self.levels, static_grid2world,
static_spacing, self.ss_sigma_factor,
self.mask0)
if self.verbosity >= VerbosityLevels.DEBUG:
logger.info('Moving scale space:')
for level in range(self.levels):
self.moving_ss.print_level(level)
logger.info('Static scale space:')
for level in range(self.levels):
self.static_ss.print_level(level)
# Get the properties of the coarsest level from the static image. These
# properties will be taken as the reference discretization.
disp_shape = self.static_ss.get_domain_shape(self.levels-1)
disp_grid2world = self.static_ss.get_affine(self.levels-1)
# The codomain discretization of both diffeomorphic maps is
# precisely the discretization of the static image
codomain_shape = static.shape
codomain_grid2world = static_grid2world
# The forward model transforms points from the static image
# to points on the reference (which is the static as well). So the
# domain properties are taken from the static image. Since its the same
# as the reference, we don't need to pre-align.
domain_shape = static.shape
domain_grid2world = static_grid2world
self.static_to_ref = DiffeomorphicMap(self.dim,
disp_shape,
disp_grid2world,
domain_shape,
domain_grid2world,
codomain_shape,
codomain_grid2world,
None)
self.static_to_ref.allocate()
# The backward model transforms points from the moving image
# to points on the reference (which is the static). So the input
# properties are taken from the moving image, and we need to pre-align
# points on the moving physical space to the reference physical space
# by applying the inverse of pre-align. This is done this way to make
# it clear for the user: the pre-align matrix is usually obtained by
# doing affine registration of the moving image towards the static
# image, which results in a matrix transforming points in the static
# physical space to points in the moving physical space
prealign_inv = None if prealign is None else npl.inv(prealign)
domain_shape = moving.shape
domain_grid2world = moving_grid2world
self.moving_to_ref = DiffeomorphicMap(self.dim,
disp_shape,
disp_grid2world,
domain_shape,
domain_grid2world,
codomain_shape,
codomain_grid2world,
prealign_inv)
self.moving_to_ref.allocate()
def _end_optimizer(self):
"""Frees the resources allocated during initialization
"""
del self.moving_ss
del self.static_ss
def _iterate(self):
"""Performs one symmetric iteration
Performs one iteration of the SyN algorithm:
1.Compute forward
2.Compute backward
3.Update forward
4.Update backward
5.Compute inverses
6.Invert the inverses
Returns
-------
der : float
the derivative of the energy profile, computed by fitting a
quadratic function to the energy values at the latest T iterations,
where T = self.energy_window. If the current iteration is less than
T then np.inf is returned instead.
"""
# Acquire current resolution information from scale spaces
current_moving = self.moving_ss.get_image(self.current_level)
current_static = self.static_ss.get_image(self.current_level)
current_disp_shape = \
self.static_ss.get_domain_shape(self.current_level)
current_disp_grid2world = \
self.static_ss.get_affine(self.current_level)
current_disp_world2grid = \
self.static_ss.get_affine_inv(self.current_level)
current_disp_spacing = \
self.static_ss.get_spacing(self.current_level)
# Warp the input images (smoothed to the current scale) to the common
# (reference) space at the current resolution
wstatic = self.static_to_ref.transform_inverse(current_static,
'linear',
None,
current_disp_shape,
current_disp_grid2world)
wmoving = self.moving_to_ref.transform_inverse(current_moving,
'linear',
None,
current_disp_shape,
current_disp_grid2world)
# Pass both images to the metric. Now both images are sampled on the
# reference grid (equal to the static image's grid) and the direction
# doesn't change across scales
self.metric.set_moving_image(wmoving, current_disp_grid2world,
current_disp_spacing,
self.static_direction)
self.metric.use_moving_image_dynamics(
current_moving, self.moving_to_ref.inverse())
self.metric.set_static_image(wstatic, current_disp_grid2world,
current_disp_spacing,
self.static_direction)
self.metric.use_static_image_dynamics(
current_static, self.static_to_ref.inverse())
# Initialize the metric for a new iteration
self.metric.initialize_iteration()
if self.callback is not None:
self.callback(self, RegistrationStages.ITER_START)
# Compute the forward step (to be used to update the forward transform)
fw_step = np.array(self.metric.compute_forward())
# set zero displacements at the boundary
fw_step = self.__set_no_boundary_displacement(fw_step)
# Normalize the forward step
nrm = np.sqrt(np.sum((fw_step/current_disp_spacing)**2, -1)).max()
if nrm > 0:
fw_step /= nrm
# Add to current total field
self.static_to_ref.forward, md_forward = self.update(
self.static_to_ref.forward, fw_step,
current_disp_world2grid, self.step_length)
del fw_step
# Keep track of the forward energy
fw_energy = self.metric.get_energy()
# Compose backward step (to be used to update the backward transform)
bw_step = np.array(self.metric.compute_backward())
# set zero displacements at the boundary
bw_step = self.__set_no_boundary_displacement(bw_step)
# Normalize the backward step
nrm = np.sqrt(np.sum((bw_step/current_disp_spacing) ** 2, -1)).max()
if nrm > 0:
bw_step /= nrm
# Add to current total field
self.moving_to_ref.forward, md_backward = self.update(
self.moving_to_ref.forward, bw_step,
current_disp_world2grid, self.step_length)
del bw_step
# Keep track of the energy
bw_energy = self.metric.get_energy()
der = np.inf
n_iter = len(self.energy_list)
if len(self.energy_list) >= self.energy_window:
der = self._get_energy_derivative()
if self.verbosity >= VerbosityLevels.DIAGNOSE:
ch = '-' if np.isnan(der) else der
logger.info('%d:\t%0.6f\t%0.6f\t%0.6f\t%s' %
(n_iter, fw_energy, bw_energy,
fw_energy + bw_energy, ch))
self.energy_list.append(fw_energy + bw_energy)
self.__invert_models(current_disp_world2grid, current_disp_spacing)
# Free resources no longer needed to compute the forward and backward
# steps
if self.callback is not None:
self.callback(self, RegistrationStages.ITER_END)
self.metric.free_iteration()
return der
def __set_no_boundary_displacement(self, step):
""" set zero displacements at the boundary
Parameters
----------
step : array, ndim 2 or 3
displacements field
Returns
-------
step : array, ndim 2 or 3
displacements field
"""
step[0, ...] = 0
step[:, 0, ...] = 0
step[-1, ...] = 0
step[:, -1, ...] = 0
if self.dim == 3:
step[:, :, 0, ...] = 0
step[:, :, -1, ...] = 0
return step
def __invert_models(self, current_disp_world2grid, current_disp_spacing):
"""Converting static - moving models in both direction.
Parameters
----------
current_disp_world2grid : array, shape (3, 3) or (4, 4)
the space-to-grid transformation associated to the displacement field
d (transforming physical space coordinates to voxel coordinates of the
displacement field grid)
current_disp_spacing :array, shape (2,) or (3,)
the spacing between voxels (voxel size along each axis)
"""
# Invert the forward model's forward field
self.static_to_ref.backward = np.array(
self.invert_vector_field(self.static_to_ref.forward,
current_disp_world2grid,
current_disp_spacing,
self.inv_iter, self.inv_tol,
self.static_to_ref.backward))
# Invert the backward model's forward field
self.moving_to_ref.backward = np.array(
self.invert_vector_field(self.moving_to_ref.forward,
current_disp_world2grid,
current_disp_spacing,
self.inv_iter, self.inv_tol,
self.moving_to_ref.backward))
# Invert the forward model's backward field
self.static_to_ref.forward = np.array(
self.invert_vector_field(self.static_to_ref.backward,
current_disp_world2grid,
current_disp_spacing,
self.inv_iter, self.inv_tol,
self.static_to_ref.forward))
# Invert the backward model's backward field
self.moving_to_ref.forward = np.array(
self.invert_vector_field(self.moving_to_ref.backward,
current_disp_world2grid,
current_disp_spacing,
self.inv_iter, self.inv_tol,
self.moving_to_ref.forward))
def _approximate_derivative_direct(self, x, y):
"""Derivative of the degree-2 polynomial fit of the given x, y pairs
Directly computes the derivative of the least-squares-fit quadratic
function estimated from (x[...],y[...]) pairs.
Parameters
----------
x : array, shape (n,)
increasing array representing the x-coordinates of the points to
be fit
y : array, shape (n,)
array representing the y-coordinates of the points to be fit
Returns
-------
y0 : float
the estimated derivative at x0 = 0.5*len(x)
"""
x = np.asarray(x)
y = np.asarray(y)
X = np.row_stack((x**2, x, np.ones_like(x)))
XX = (X).dot(X.T)
b = X.dot(y)
beta = npl.solve(XX, b)
x0 = 0.5 * len(x)
y0 = 2.0 * beta[0] * (x0) + beta[1]
return y0
def _get_energy_derivative(self):
"""Approximate derivative of the energy profile
Returns the derivative of the estimated energy as a function of "time"
(iterations) at the last iteration
"""
n_iter = len(self.energy_list)
if n_iter < self.energy_window:
raise ValueError('Not enough data to fit the energy profile')
x = range(self.energy_window)
y = self.energy_list[(n_iter - self.energy_window):n_iter]
ss = sum(y)
if not ss == 0: # avoid division by zero
ss = - ss if ss > 0 else ss
y = [v / ss for v in y]
der = self._approximate_derivative_direct(x, y)
return der
def _optimize(self):
"""Starts the optimization
The main multi-scale symmetric optimization algorithm
"""
self.full_energy_profile = []
if self.callback is not None:
self.callback(self, RegistrationStages.OPT_START)
for level in range(self.levels - 1, -1, -1):
if self.verbosity >= VerbosityLevels.STATUS:
logger.info('Optimizing level %d' % level)
self.current_level = level
self.metric.set_levels_below(self.levels - level)
self.metric.set_levels_above(level)
if level < self.levels - 1:
expand_factors = \
self.static_ss.get_expand_factors(level+1, level)
new_shape = self.static_ss.get_domain_shape(level)
self.static_to_ref.expand_fields(expand_factors, new_shape)
self.moving_to_ref.expand_fields(expand_factors, new_shape)
self.niter = 0
self.energy_list = []
derivative = np.inf
if self.callback is not None:
self.callback(self, RegistrationStages.SCALE_START)
while ((self.niter < self.level_iters[self.levels - 1 - level]) and
(self.opt_tol < derivative)):
derivative = self._iterate()
self.niter += 1
self.full_energy_profile.extend(self.energy_list)
if self.callback is not None:
self.callback(self, RegistrationStages.SCALE_END)
# Reporting mean and std in stats[1] and stats[2]
residual, stats = self.static_to_ref.compute_inversion_error()
if self.verbosity >= VerbosityLevels.DIAGNOSE:
logger.info('Static-Reference Residual error: %0.6f (%0.6f)'
% (stats[1], stats[2]))
residual, stats = self.moving_to_ref.compute_inversion_error()
if self.verbosity >= VerbosityLevels.DIAGNOSE:
logger.info('Moving-Reference Residual error :%0.6f (%0.6f)'
% (stats[1], stats[2]))
# Compose the two partial transformations
self.static_to_ref = self.moving_to_ref.warp_endomorphism(
self.static_to_ref.inverse()).inverse()
# Report mean and std for the composed deformation field
residual, stats = self.static_to_ref.compute_inversion_error()
if self.verbosity >= VerbosityLevels.DIAGNOSE:
logger.info('Final residual error: %0.6f (%0.6f)' % (stats[1],
stats[2]))
if self.callback is not None:
self.callback(self, RegistrationStages.OPT_END)
def optimize(self, static, moving, static_grid2world=None,
moving_grid2world=None, prealign=None):
"""
Starts the optimization
Parameters
----------
static : array, shape (S, R, C) or (R, C)
the image to be used as reference during optimization. The
displacement fields will have the same discretization as the static
image.
moving : array, shape (S, R, C) or (R, C)
the image to be used as "moving" during optimization. Since the
deformation fields' discretization is the same as the static image,
it is necessary to pre-align the moving image to ensure its domain
lies inside the domain of the deformation fields. This is assumed
to be accomplished by "pre-aligning" the moving image towards the
static using an affine transformation given by the 'prealign'
matrix
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation associated to the static image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation associated to the moving image
prealign : array, shape (dim+1, dim+1)
the affine transformation (operating on the physical space)
pre-aligning the moving image towards the static
Returns
-------
static_to_ref : DiffeomorphicMap object
the diffeomorphic map that brings the moving image towards the
static one in the forward direction (i.e. by calling
static_to_ref.transform) and the static image towards the
moving one in the backward direction (i.e. by calling
static_to_ref.transform_inverse).
"""
if self.verbosity >= VerbosityLevels.DEBUG:
if prealign is not None:
logger.info("Pre-align: " + str(prealign))
self._init_optimizer(static.astype(floating), moving.astype(floating),
static_grid2world, moving_grid2world, prealign)
self._optimize()
self._end_optimizer()
self.static_to_ref.forward = np.array(self.static_to_ref.forward)
self.static_to_ref.backward = np.array(self.static_to_ref.backward)
return self.static_to_ref
|
FrancoisRheaultUS/dipy
|
dipy/align/imwarp.py
|
Python
|
bsd-3-clause
| 63,918
|
[
"Gaussian"
] |
23a5b85673f761ee1db26db4b89c6f54e364af8858ad1c490b89f85b35d5b0e7
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('update_permissions')
@click.argument("dataset_id", type=str)
@click.option(
"--access_ids",
help="role IDs which should have access permissions for the dataset.",
type=str,
multiple=True
)
@click.option(
"--manage_ids",
help="role IDs which should have manage permissions for the dataset.",
type=str,
multiple=True
)
@click.option(
"--modify_ids",
help="role IDs which should have modify permissions for the dataset.",
type=str,
multiple=True
)
@pass_context
@custom_exception
@json_output
def cli(ctx, dataset_id, access_ids="", manage_ids="", modify_ids=""):
"""Set access, manage or modify permissions for a dataset to a list of roles.
Output:
Current roles for all available permission types.
.. note::
This method can only be used with Galaxy ``release_19.05`` or later.
"""
return ctx.gi.datasets.update_permissions(dataset_id, access_ids=access_ids, manage_ids=manage_ids, modify_ids=modify_ids)
|
galaxy-iuc/parsec
|
parsec/commands/datasets/update_permissions.py
|
Python
|
apache-2.0
| 1,127
|
[
"Galaxy"
] |
f7619a8383503c4871b11ac90716a46a920b8648bffa64b50df4d34bf865db3c
|
# -*- coding: utf-8 -*-
"""Building and simulating spiking neural networks using Brian2.
@author: rbodo
"""
import warnings
import numpy as np
import os
from tensorflow.keras.models import load_model
from snntoolbox.parsing.utils import get_type
from snntoolbox.simulation.utils import AbstractSNN, get_shape_from_label, \
build_convolution, build_pooling, get_ann_ops
from snntoolbox.utils.utils import confirm_overwrite
class SNN(AbstractSNN):
"""
Represents the compiled spiking neural network, ready for testing in a
spiking simulator.
Attributes
----------
layers: list[brian2.NeuronGroup]
Each entry represents a layer, i.e. a population of neurons, in form of
Brian2 ``NeuronGroup`` objects.
connections: list[brian2.Synapses]
Brian2 ``Synapses`` objects representing the connections between
individual layers.
threshold: str
Defines spiking threshold.
v_reset: str
Defines reset potential.
eqs: str
Differential equation for membrane potential.
spikemonitors: list[brian2.SpikeMonitor]
Brian2 ``SpikeMonitor`` s for each layer that records spikes.
statemonitors: list[brian2.StateMonitor]
Brian2 ``StateMonitor`` s for each layer that records membrane
potential.
snn: brian2.Network
The spiking network.
"""
def __init__(self, config, queue=None):
AbstractSNN.__init__(self, config, queue)
self.layers = []
self.connections = [] # Final container for all layers.
self.threshold = 'v >= v_thresh'
if 'subtraction' in config.get('cell', 'reset'):
self.v_reset = 'v = v - v_thresh'
else:
self.v_reset = 'v = v_reset'
self.eqs = '''dv/dt = bias : 1
bias : hertz'''
self.spikemonitors = []
self.statemonitors = []
self.snn = None
self._input_layer = None
self._cell_params = None
# Track the output layer spikes.
self.output_spikemonitor = None
@property
def is_parallelizable(self):
return False
def add_input_layer(self, input_shape):
if self._poisson_input:
self.layers.append(self.sim.PoissonGroup(
np.prod(input_shape[1:]), rates=0*self.sim.Hz,
dt=self._dt*self.sim.ms))
else:
self.layers.append(self.sim.NeuronGroup(
np.prod(input_shape[1:]), model=self.eqs, method='euler',
reset=self.v_reset, threshold=self.threshold,
dt=self._dt * self.sim.ms))
self.layers[0].add_attribute('label')
self.layers[0].label = 'InputLayer'
self.spikemonitors.append(self.sim.SpikeMonitor(self.layers[0]))
# Need placeholders "None" for layers without states:
self.statemonitors.append(self.sim.StateMonitor(self.layers[0], [],
False))
def add_layer(self, layer):
# Latest Keras versions need special permutation after Flatten layers.
if 'Flatten' in layer.__class__.__name__ and \
self.config.get('input', 'model_lib') == 'keras':
self.flatten_shapes.append(
(layer.name, get_shape_from_label(self.layers[-1].label)))
return
self.layers.append(self.sim.NeuronGroup(
np.prod(layer.output_shape[1:]), model=self.eqs, method='euler',
reset=self.v_reset, threshold=self.threshold,
dt=self._dt * self.sim.ms))
self.connections.append(self.sim.Synapses(
self.layers[-2], self.layers[-1], 'w:1', on_pre='v+=w',
dt=self._dt * self.sim.ms))
self.layers[-1].add_attribute('label')
self.layers[-1].label = layer.name
if 'spiketrains' in self._plot_keys \
or 'spiketrains_n_b_l_t' in self._log_keys:
self.spikemonitors.append(self.sim.SpikeMonitor(self.layers[-1]))
if 'v_mem' in self._plot_keys or 'mem_n_b_l_t' in self._log_keys:
self.statemonitors.append(self.sim.StateMonitor(self.layers[-1],
'v', True))
def build_dense(self, layer, weights=None):
if layer.activation == 'softmax':
raise warnings.warn("Activation 'softmax' not implemented. Using "
"'relu' activation instead.", RuntimeWarning)
_weights, biases = layer.get_weights()
if weights is None:
weights = _weights
self.set_biases(biases)
delay = self.config.getfloat('cell', 'delay')
connections = []
if len(self.flatten_shapes) == 1:
print("Swapping data_format of Flatten layer.")
flatten_name, shape = self.flatten_shapes.pop()
if self.data_format == 'channels_last':
y_in, x_in, f_in = shape
else:
f_in, y_in, x_in = shape
for i in range(weights.shape[0]): # Input neurons
# Sweep across channel axis of feature map. Assumes that each
# consecutive input neuron lies in a different channel. This is
# the case for channels_last, but not for channels_first.
f = i % f_in
# Sweep across height of feature map. Increase y by one if all
# rows along the channel axis were seen.
y = i // (f_in * x_in)
# Sweep across width of feature map.
x = (i // f_in) % x_in
new_i = f * x_in * y_in + x_in * y + x
for j in range(weights.shape[1]): # Output neurons
connections.append((new_i, j, weights[i, j], delay))
elif len(self.flatten_shapes) > 1:
raise RuntimeWarning("Not all Flatten layers have been consumed.")
else:
for i in range(weights.shape[0]):
for j in range(weights.shape[1]):
connections.append((i, j, weights[i, j], delay))
connections = np.array(connections)
self.connections[-1].connect(i=connections[:, 0].astype('int64'),
j=connections[:, 1].astype('int64'))
self.connections[-1].w = connections[:, 2]
def build_convolution(self, layer, weights=None):
delay = self.config.getfloat('cell', 'delay')
transpose_kernel = \
self.config.get('simulation', 'keras_backend') == 'tensorflow'
conns, biases = build_convolution(layer, delay, transpose_kernel)
connections = np.array(conns)
self.set_biases(biases)
print("Connecting layer...")
self.connections[-1].connect(i=connections[:, 0].astype('int64'),
j=connections[:, 1].astype('int64'))
w = connections[:, 2] if weights is None else weights.flatten()
self.connections[-1].w = w
def build_pooling(self, layer, weights=None):
delay = self.config.getfloat('cell', 'delay')
connections = np.array(build_pooling(layer, delay))
self.connections[-1].connect(i=connections[:, 0].astype('int64'),
j=connections[:, 1].astype('int64'))
w = connections[:, 2] if weights is None else weights.flatten()
self.connections[-1].w = w
def compile(self):
self.output_spikemonitor = self.sim.SpikeMonitor(self.layers[-1])
spikemonitors = self.spikemonitors + [self.output_spikemonitor]
self.snn = self.sim.Network(self.layers, self.connections,
spikemonitors, self.statemonitors)
self.snn.store()
# Set input layer
for obj in self.snn.objects:
if hasattr(obj, 'label') and obj.label == 'InputLayer':
self._input_layer = obj
assert self._input_layer, "No input layer found."
def simulate(self, **kwargs):
inputs = kwargs[str('x_b_l')].flatten() / self.sim.ms
if self._poisson_input:
self._input_layer.rates = inputs / self.rescale_fac
elif self._is_aedat_input:
# TODO: Implement by using brian2.SpikeGeneratorGroup.
raise NotImplementedError
else:
self._input_layer.bias = inputs
self.snn.run(self._duration * self.sim.ms, namespace=self._cell_params,
report='stdout', report_period=10 * self.sim.ms)
output_b_l_t = self.get_recorded_vars(self.layers)
return output_b_l_t
def reset(self, sample_idx):
mod = self.config.getint('simulation', 'reset_between_nth_sample')
mod = mod if mod else sample_idx + 1
if sample_idx % mod == 0:
print("Resetting simulator...")
self.snn.restore()
def end_sim(self):
pass
def save(self, path, filename):
print("Saving weights ...")
for i, connection in enumerate(self.connections):
filepath = os.path.join(path,
self.config.get('paths', 'filename_snn'),
'brian2-model',
self.layers[i + 1].label + '.npz')
if self.config.getboolean('output', 'overwrite') \
or confirm_overwrite(filepath):
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
print("Store weights of layer {} to file {}".format(
self.layers[i + 1].label, filepath))
np.savez(filepath, self.connections[i].w)
def load(self, path, filename):
dirpath = os.path.join(path, filename, 'brian2-model')
npz_files = [f for f in sorted(os.listdir(dirpath))
if os.path.isfile(os.path.join(dirpath, f))]
print("Loading spiking model...")
self.parsed_model = load_model(
os.path.join(self.config.get('paths', 'path_wd'),
self.config.get('paths',
'filename_parsed_model') + '.h5'))
self.num_classes = int(self.parsed_model.layers[-1].output_shape[-1])
self.top_k = min(self.num_classes, self.config.getint('simulation',
'top_k'))
# Get batch input shape
batch_shape = list(self.parsed_model.layers[0].batch_input_shape)
batch_shape[0] = self.batch_size
if self.config.get('conversion', 'spike_code') == 'ttfs_dyn_thresh':
batch_shape[0] *= 2
self.add_input_layer(batch_shape)
# Iterate over layers to create spiking neurons and connections.
for layer, f in zip(self.parsed_model.layers[1:], npz_files):
print("Building layer: {}".format(layer.name))
self.add_layer(layer)
layer_type = get_type(layer)
filepath = os.path.join(dirpath, f)
print("Using layer-weights stored in: {}".format(filepath))
print("Loading stored weights...")
input_file = np.load(filepath)
weights = input_file['arr_0']
if layer_type == 'Dense':
self.build_dense(layer, weights=weights)
elif layer_type == 'Conv2D':
self.build_convolution(layer, weights=weights)
if layer.data_format == 'channels_last':
self.data_format = layer.data_format
elif layer_type in {'MaxPooling2D', 'AveragePooling2D'}:
self.build_pooling(layer, weights=weights)
elif layer_type == 'Flatten':
self.build_flatten(layer)
print("Compiling spiking model...\n")
self.compile()
# Compute number of operations of ANN.
if self.fanout is None:
self.set_connectivity()
self.operations_ann = get_ann_ops(self.num_neurons,
self.num_neurons_with_bias,
self.fanin)
print("Number of operations of ANN: {}".format(
self.operations_ann))
print("Number of neurons: {}".format(sum(self.num_neurons[1:])))
print("Number of synapses: {}\n".format(self.num_synapses))
self.is_built = True
def init_cells(self):
self._cell_params = {
'v_thresh': self.config.getfloat('cell', 'v_thresh'),
'v_reset': self.config.getfloat('cell', 'v_reset'),
'tau_m': self.config.getfloat('cell', 'tau_m') * self.sim.ms}
def get_spiketrains(self, **kwargs):
j = self._spiketrains_container_counter
if self.spiketrains_n_b_l_t is None or \
j >= len(self.spiketrains_n_b_l_t):
return None
shape = self.spiketrains_n_b_l_t[j][0].shape
# Outer for-loop that calls this function starts with
# 'monitor_index' = 0, but this is reserved for the input and handled
# by `get_spiketrains_input()`.
i = len(self.spikemonitors) - 1 if kwargs[str('monitor_index')] == -1 \
else kwargs[str('monitor_index')] + 1
spiketrain_dict = self.spikemonitors[i].spike_trains()
spiketrains_flat = np.array([spiketrain_dict[key] / self.sim.ms for key
in spiketrain_dict.keys()])
spiketrains_b_l_t = \
self.reshape_flattened_spiketrains(spiketrains_flat, shape)
return spiketrains_b_l_t
def get_spiketrains_input(self):
shape = list(self.parsed_model.input_shape) + [self._num_timesteps]
spiketrain_dict = self.spikemonitors[0].spike_trains()
spiketrains_flat = np.array([spiketrain_dict[key] / self.sim.ms for key
in spiketrain_dict.keys()])
spiketrains_b_l_t = \
self.reshape_flattened_spiketrains(spiketrains_flat, shape)
return spiketrains_b_l_t
def get_spiketrains_output(self):
shape = [self.batch_size, self.num_classes, self._num_timesteps]
spiketrain_dict = self.output_spikemonitor.spike_trains()
spiketrains_flat = np.array([spiketrain_dict[key] / self.sim.ms for key
in spiketrain_dict.keys()])
spiketrains_b_l_t = \
self.reshape_flattened_spiketrains(spiketrains_flat, shape)
return spiketrains_b_l_t
def get_vmem(self, **kwargs):
j = kwargs[str('monitor_index')]
if j >= len(self.statemonitors):
return None
try:
return np.array([
np.array(v).transpose() for v in self.statemonitors[j].v])
except AttributeError:
return None
def set_spiketrain_stats_input(self):
AbstractSNN.set_spiketrain_stats_input(self)
def set_biases(self, biases):
"""Set biases."""
if any(biases):
assert self.layers[-1].bias.shape == biases.shape, \
"Shape of biases and network do not match."
self.layers[-1].bias = biases / self.sim.ms
|
NeuromorphicProcessorProject/snn_toolbox
|
snntoolbox/simulation/target_simulators/brian2_target_sim.py
|
Python
|
mit
| 15,271
|
[
"NEURON"
] |
2a13efb9375d4c015fa63e429803f47f9226ceb64b6e9e76c0aa90163f25e444
|
from model import User, Location, Visit
import pylibmc
mc = pylibmc.Client(["127.0.0.1"], binary=True,
behaviors={"tcp_nodelay": True})
class RepositoryBase(object):
def add_item(self, data):
key, item = self.create_item(data)
mc.set(key, item)
return item
def add_multi(self, data):
items = {}
for x in data:
k,v = self.create_item(x)
items[k] = v
mc.set_multi(items)
def get_multi(self, data):
count = 0
items = []
result = {}
for i in data:
count += 1
items.append(i)
if count == 1000:
esult.update(mc.get_multi([self.get_key(x) for x in items]))
count = 0
result.update(mc.get_multi([self.get_key(x) for x in items]))
return result
def update_multi(self, data):
mc.set_multi(data)
def update_item(self, new_item):
key = self.get_key(new_item.id)
mc.set(key, new_item)
def update_item_from_dict(self, id, data):
key = self.get_key(id)
item = mc.get(key)
if not item:
raise KeyError()
item.update(data)
mc.set(key, item)
def get_item(self, id):
key = self.get_key(id)
return mc.get(key)
class UserRepository(RepositoryBase):
def get_key(self, id):
return "user" + str(id)
def create_item(self, data):
user = data if isinstance(data, User) else User(data)
return self.get_key(user.id), user
class LocationRepository(RepositoryBase):
def get_key(self, id):
return "location" + str(id)
def create_item(self, data):
location = data if isinstance(data, Location) else Location(data)
return self.get_key(location.id), location
class VisitRepository(RepositoryBase):
def get_key(self, id):
return "visit" + str(id)
def create_item(self, data):
visit = data if isinstance(data, Visit) else Visit(data)
return self.get_key(visit.id), visit
|
KonbOgonb/hlc_r1
|
app/repository.py
|
Python
|
mit
| 2,067
|
[
"VisIt"
] |
66913d57583e594a201878119844358e24d426f074feb97ac7bd82f442bfbdbd
|
#!/usr/bin/env python
"""Deainterleave FastQ. Can handle paired and single end.
This can for example be used to properly convert a PE BAM back to FastQ:
htscmd bamshuf -uOn 128 $bam tmp | htscmd bam2fq -a - | deinterleave_fastq.py - $outprefix
"""
__author__ = "Andreas Wilm"
__email__ = "wilma@gis.a-star.edu.sg"
__copyright__ = "2014 Genome Institute of Singapore"
__license__ = "GPL2"
#--- standard library imports
#
import sys
import logging
import os
import gzip
from collections import namedtuple
#--- third-party imports
#
#/
#--- project specific imports
#
# /
FastqSeq = namedtuple('FastqSeq', ['id', 'sq', 'bq'])
#global logger
# http://docs.python.org/library/logging.html
LOG = logging.getLogger("")
logging.basicConfig(level=logging.WARN,
format='%(levelname)s [%(asctime)s]: %(message)s')
def get_baseid(seqid):
"""return base of sequence id, i.e. without pair info.
would also work if we used biopython ids, because for newer
fastq (illumina>1.8+) the pair info is in the description not the
id/name so there it's safe to return id as it is
"""
if seqid[-2] == "/":
#print "returning %s for %s" % (seqid[:-2], seqid)
return seqid[:-2]
else:
#print "returning %s" % seqid
return seqid
def parse_fastq(fh):
"""iterator for reading fastq file. keep it simple and make no
interpretation of data, e.g. base qualities (exception is id2
which is not saved). this way we save some time and each entry can
almost be printed as is"""
while True:
# check for eof which should only happen after complete entry,
# i.e before new id. "readline(): empty string is returned
# only when EOF is encountered immediately."
sid = fh.readline()
if len(sid)==0:
break
assert sid[0] == '@', (
"Expected id starting with @, but got '%s'" % sid)
sid = sid.rstrip()[1:]
sq = fh.readline().rstrip()
tmp = fh.readline().rstrip()
assert tmp[0] == "+", (
"Expected second id line starting with +, but got '%s'" % tmp)
bq = fh.readline().rstrip()
assert len(sq) == len(bq), (
"Length mismatch between bases (%d) and base qualities (%d)" % (len(sq), len(bq)))
yield FastqSeq(sid, sq, bq)
def write_fastq(fh, fastqseq):
fh.write("@%s\n%s\n+\n%s\n" % (fastqseq.id, fastqseq.sq, fastqseq.bq))
def main():
"""main function
"""
# poor man's argparse
try:
fq_in = sys.argv[1]
fq_outbase = sys.argv[2]
except IndexError:
sys.stderr.write("Usage: %s in.fq[.gz] out_fq_prefix\n" % sys.argv[0])
sys.exit(1)
LOG.setLevel(logging.INFO)
if fq_in == "-":
fh_in = sys.stdin
else:
assert os.path.exists(fq_in)
if fq_in[-3:] == ".gz":
fh_in = gzip.open(fq_in)
else:
fh_in = open(fq_in)
fq_out_pe1 = fq_outbase + "_1.fastq.gz"
fq_out_pe2 = fq_outbase + "_2.fastq.gz"
fq_out_sr = fq_outbase + "_0.fastq.gz"
for f in [fq_out_pe1, fq_out_pe2, fq_out_sr]:
if os.path.exists(f):
LOG.fatal("Cowardly refusing to overwrite already existing file %s" % f)
sys.exit(1)
LOG.info("Writing PE reads to %s and %s and single end reads to %s" % (
fq_out_pe1, fq_out_pe2, fq_out_sr))
fh_out_pe1 = gzip.open(fq_out_pe1, 'w')
fh_out_pe2 = gzip.open(fq_out_pe2, 'w')
fh_out_sr = gzip.open(fq_out_sr, 'w')
prevseq = None
curseq = None
num_in = num_sr = num_pe = 0
for curseq in parse_fastq(fh_in):
num_in += 1
if prevseq:
# name match indicates pair, otherwise the previously read seq was single
if get_baseid(prevseq.id)==get_baseid(curseq.id):
write_fastq(fh_out_pe1, prevseq)
write_fastq(fh_out_pe2, curseq)
prevseq = None
num_pe += 2
else:
write_fastq(fh_out_sr, prevseq)
num_sr += 1
prevseq = curseq
else:
prevseq = curseq
# if there's still on left, then it must have been sr
if prevseq:
write_fastq(fh_out_sr, prevseq)
num_sr += 1
for fh in [fh_out_pe1, fh_out_pe2, fh_out_sr, fh_in]:
fh.close()
LOG.info('Written %d PE reads and %d SR reads (total was %d)' % (
num_pe, num_sr, num_in))
if __name__ == "__main__":
main()
LOG.info("Successful program exit")
|
CSB5/misc-scripts
|
deinterleave_fastq.py
|
Python
|
gpl-2.0
| 4,554
|
[
"Biopython"
] |
edd4a1d491db030685e47ee56f74823bb51d352b386ba2858d7ff1082dc400dd
|
# This file is taken from the server repo (dropbox/python_linters/main.py) and
# changed to output in a format that arc understands (--arc-out option).
from __future__ import absolute_import
"Used to run the python AST linters on a set of files. Normally run through bin/lint"
import ast
import argparse
import json
import multiprocessing
import os
import random as insecure_random
import re
import textwrap
import sys
from linters.analysis import AnalysisVisitor, prettyprint_ast
from linters.linters import all_linters
server_root = os.path.dirname(__file__) + '/..'
def check_test_case(filename):
print "Checking", filename
with open(filename) as f:
parsed = ast.parse(f.read(), filename)
expected_stmt = parsed.body[-1]
no_expected_msg = "Can't find `expected =` block at end of test file"
assert type(expected_stmt) == ast.Assign, no_expected_msg
assert len(expected_stmt.targets) == 1, no_expected_msg
assert expected_stmt.targets[0].id == "expected", no_expected_msg
expected = ast.literal_eval(expected_stmt.value)
visitor = AnalysisVisitor(all_linters, filename)
for stmt in parsed.body[:-1]:
visitor.visit(stmt)
assert visitor.items == expected, json.dumps({
"filename": filename,
"expected": expected,
"actual": visitor.items
}, indent=4)
def check_all():
for (dirpath, dirname, filenames) in os.walk(os.path.dirname(__file__) + "/test/"):
for filename in filenames:
check_test_case(dirpath + "/" + filename)
def run(enforce_paths, linters, json_out, arc_out, run_all):
output = []
# Walk all folders in `enforce_paths`, but make sure we de-duplicate things
# in case someone passes in the same file twice or both a folder and a single
# file inside it.
seen = set()
items = []
for enforce_path in enforce_paths:
if not os.path.isdir(enforce_path):
path = os.path.relpath(
os.path.normpath(enforce_path),
server_root
)
items.append(path)
seen.add(path)
else:
for (dirpath, dirnames, filenames) in os.walk(enforce_path):
for filename in filenames:
if filename.endswith(".py"):
path = os.path.relpath(
os.path.normpath(dirpath + "/" + filename),
server_root
)
if path not in seen:
seen.add(path)
items.append(path)
dirnames[:] = [d for d in dirnames if not d.startswith(".")]
if not json_out:
count_msg = len(linters) if run_all else "enforced"
print "Running", count_msg, "linters on", len(items), "files"
pool = multiprocessing.Pool()
results = pool.map(
run_on_file,
[(item, linters, run_all, json_out) for item in items]
)
for res in results:
if res:
item_output, item_lines = res
output += item_output
pool.close()
if json_out:
print json.dumps(output, indent=4)
elif arc_out:
print arc_output(output)
else:
print prettyprint_output(output)
def run_on_file(args):
filepath, linters, run_all, json_out = args
enforced_linters = [
linter
for linter in linters
if linter.enforce_pattern is not None and re.match(linter.enforce_pattern, filepath) or run_all
]
res = visit_path(enforced_linters, filepath)
# Print a rough "progress" bar approx once every 177 files so people can
# See the linter making progress when running on a large set of files
if not json_out and insecure_random.random() > 0.99:
sys.stdout.write(".")
sys.stdout.flush()
return res
def visit_path(linters, filepath):
output = []
visitor = AnalysisVisitor(linters, filepath)
try:
with open(filepath) as f:
txt = f.read()
parsed = ast.parse(txt, filepath)
visitor.visit(parsed)
for item in visitor.items:
item["filepath"] = filepath
output.append(item)
return output, len(txt.split("\n"))
except IOError:
# This happens when you can't read a file because it's a
# broken symlink
pass
def prettyprint_output(lint_output):
linters_to_warnings = {}
for lint_warning in lint_output:
if lint_warning['linter_name'] not in linters_to_warnings:
linters_to_warnings[lint_warning['linter_name']] = []
linters_to_warnings[lint_warning['linter_name']].append(lint_warning)
output = []
for linter_name, warnings in linters_to_warnings.items():
output += [warnings[0]['linter_name']]
output += ["=" * len(warnings[0]['linter_name'])]
output += textwrap.wrap(warnings[0]['desc'])
output += [""]
for warning in warnings:
output += [warning['filepath'] + ":" + str(warning['lineno'])]
output += [""]
return "\n".join(output)
def arc_output(lint_output):
lines = []
for lint_warning in lint_output:
lines.append('{severity}:{line_number} {message}'.format(
severity=lint_warning['severity'],
line_number=lint_warning['lineno'],
message=lint_warning['desc'],
))
return '\n'.join(lines)
def main():
parser = argparse.ArgumentParser(description='Runs our Python linters on the server codebase')
parser.add_argument('TARGETS', nargs="*", help=
"Run all lint rules on a file or folder"
)
parser.add_argument('--test', metavar="TEST_FILES", nargs="?", help=
"Run the linter's internal unit tests in a single file or folder; "
"used when writing a linter and writing tests to ensure the linter "
"picks up the things you would expect."
)
parser.add_argument('--dump', metavar="TARGET", nargs="?", help=
"dumps the file to a nicely-formatted AST "
"for you to read. Useful when writing your linter to try and figure out "
"how to match a particular piece of code via it's AST"
)
parser.add_argument('--all', dest="all", action='store_const', const=True, help=
"Run all the linters, not just the ones that are breaking the build"
)
parser.add_argument('--linter', nargs="?", help=
"Specify a single linter to run, by name. e.g. `--linter accessibility` or "
"`--linter ip_logging`, rather than the all linters that are breaking the build."
)
parser.add_argument('--json-out', dest="json_out", action='store_const', const=True, help=
"Dump output to JSON instead of human-readable text"
)
parser.add_argument('--arc-out', dest='arc_out', action='store_const', const=True, help=
"Dump output in an arc compatible format, assuming the regex pattern arc wants"
" is /^(?P<severity>advice|warning|error):(?P<line>\\d+) (?P<message>.*)$/m."
"--json-out takes precedence over this.")
args = parser.parse_args()
if args.linter:
linters = [
linter for linter in all_linters
if linter.linter_func.__name__ == args.linter + "_linter"
]
run_all = True
else:
linters = all_linters
run_all = args.all
if args.test == "all":
check_all()
elif args.test:
check_test_case(args.test)
elif args.dump:
with open(args.dump) as f:
parsed = ast.parse(f.read(), args.dump)
print prettyprint_ast(parsed)
else:
run(
args.TARGETS or [server_root],
linters,
args.json_out,
args.arc_out,
run_all=run_all
)
if __name__ == "__main__":
main()
|
dropbox/changes
|
linters/main.py
|
Python
|
apache-2.0
| 7,862
|
[
"VisIt"
] |
a229df8ba27233b98744e7baf7c8be9675b3b9c836031550ef5d11af44dc0a5c
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to
# submit large numbers of jobs on supercomputers. It provides a python interface to physical input,
# such as crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential
# programs. It is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
# -*- coding: utf-8 -*-
from pytest import fixture, mark
from pylada.espresso import Namelist
@fixture
def recursive_namelist():
from collections import OrderedDict
return OrderedDict([
('control', OrderedDict([
('prefix', 'al'),
('outdir', 'temporary directory for large files'),
('pseudo_dir',
'directory where pp-files are kept')
])),
('system', OrderedDict([
('ibrav', 2),
('celldm', [7.5]),
('nat', 1),
('ntyp', 1),
('ecutwfc', 12.0),
('occupations', 'smearing'),
('smearing', 'marzari-vanderbilt'),
('degauss', 0.06)
])),
('electrons', OrderedDict())])
@fixture
def simple_namelist(recursive_namelist):
return recursive_namelist['system']
@fixture
def WithTraitLets():
from traitlets import Enum
class WithTraitLets(Namelist):
ibrav = Enum([0, 1, 2, 3, 4, 5, -5, 6, 7, 8, 9, -9, 10, 11, 12, -12, 13, 14], 0,
help="Bravais class", allow_none=True)
return WithTraitLets
@mark.parametrize('name, type_, value', [
('ibrav', int, 2),
('nat', int, 1),
('ntyp', int, 1),
('ecutwfc', float, 12.0),
('celldm', list, [7.5]),
('occupations', str, 'smearing'),
('smearing', str, 'marzari-vanderbilt'),
('degauss', float, 0.06)
])
def test_scalar_namelist_attributes(simple_namelist, name, type_, value):
from numpy import abs, allclose
from pylada.misc import Sequence
nl = Namelist(simple_namelist)
assert hasattr(nl, name)
assert isinstance(getattr(nl, name), type_)
if type_ == float:
assert abs(getattr(nl, name) - value) < 1e-12
elif type_ == list:
assert allclose(getattr(nl, name), value, 1e-12)
else:
assert getattr(nl, name) == value
def test_recursive_namelist_attributes(recursive_namelist):
nl = Namelist(recursive_namelist)
assert hasattr(nl, 'system')
assert isinstance(getattr(nl, 'system'), Namelist)
assert getattr(nl.system, 'ibrav', 0) == 2
assert len(nl) == 3
def test_length_includes_traits(WithTraitLets, simple_namelist):
nl = WithTraitLets(simple_namelist)
assert len(nl) == len(simple_namelist)
nl.ibrav = None
assert len(nl) == len(simple_namelist) - 1
def test_empty_namelists_do_appear(recursive_namelist):
nl = Namelist(recursive_namelist)
assert hasattr(nl, 'electrons')
assert isinstance(getattr(nl, 'electrons'), Namelist)
assert len(nl.electrons) == 0
def test_simple_back_to_ordered(simple_namelist):
nl = Namelist(simple_namelist)
assert len(nl) > 0
back = nl.namelist()
assert len(back) == len(simple_namelist)
for back_key, key in zip(back, simple_namelist):
assert back_key == key
assert back[key] == simple_namelist[key]
def test_recursive_back_to_ordered(recursive_namelist):
from collections import OrderedDict
nl = Namelist(recursive_namelist)
assert len(nl) > 0
back = nl.namelist()
assert len(back) == len(recursive_namelist)
for back_key, key in zip(back, recursive_namelist):
assert back_key == key
assert isinstance(back[key], OrderedDict)
def test_set_known_attributes(recursive_namelist):
nl = Namelist(recursive_namelist)
nl.system.ibrav = 2
assert nl.system.ibrav == 2
def test_add_namelist_attribute(recursive_namelist):
nl = Namelist(recursive_namelist)
nl.system.bravasi = 2
assert nl.system.bravasi == 2
assert 'bravasi' in nl.system.namelist()
def test_add_private_attribute(recursive_namelist):
nl = Namelist(recursive_namelist)
nl.system._bravasi = 2
assert nl.system._bravasi == 2
assert '_bravasi' not in nl.system.namelist()
def test_delete_namelist_attribute(recursive_namelist):
from pytest import raises
nl = Namelist(recursive_namelist)
del nl.system.ibrav
with raises(AttributeError):
nl.system.ibrav
assert 'ibrav' not in nl.system.namelist()
def test_delete_private_attribute(recursive_namelist):
from pytest import raises
nl = Namelist(recursive_namelist)
nl._private = 0
del nl._private
with raises(AttributeError):
nl._private
def test_deleting_uknown_attribute_fails(recursive_namelist):
from pytest import raises
nl = Namelist(recursive_namelist)
with raises(AttributeError):
del nl.system.ibravi
with raises(AttributeError):
del nl.system._ibravi
def test_traitlets_from_empty(WithTraitLets):
from pytest import raises
from traitlets import TraitError
nl = WithTraitLets()
assert nl.ibrav == 0
nl.ibrav = 2
assert nl.ibrav == 2
with raises(TraitError):
nl.ibrav = 15
def test_traitlets_appear_in_dict(WithTraitLets):
from pytest import raises
from traitlets import TraitError
nl = WithTraitLets()
assert 'ibrav' in nl.namelist()
assert 'ibrav' not in nl.__dict__['_Namelist__inputs']
def test_traitlets_from_filled(simple_namelist, WithTraitLets):
from pytest import raises
from traitlets import TraitError
nl = WithTraitLets(simple_namelist)
assert nl.ibrav == 2
nl.ibrav = 3
assert nl.ibrav == 3
assert 'ibrav' in nl.namelist()
with raises(TraitError):
nl.ibrav = 15
def test_traitlets_cannot_be_deleted(WithTraitLets):
from pytest import raises
from traitlets import TraitError
nl = WithTraitLets()
with raises(AttributeError):
del nl.ibrav
def test_none_arguments_do_not_appear_in_dict(simple_namelist):
nl = Namelist(simple_namelist)
nl.ibrav = None
assert 'ibrav' not in nl.namelist()
def test_none_traitelets_do_not_appear_in_dict(WithTraitLets):
from pytest import raises
from traitlets import TraitError
nl = WithTraitLets()
nl.ibrav = None
assert 'ibrav' not in nl.namelist()
def test_input_transform(WithTraitLets):
from pylada.espresso.namelists import input_transform
class Transformed(WithTraitLets):
@input_transform
def __transform_ibrav(self, dictionary, value):
dictionary['ibrav'] = value
nl = Transformed()
assert nl.namelist(value=5)['ibrav'] == 5
assert nl.namelist(value=6)['ibrav'] == 6
def test_write_read_loop(recursive_namelist):
nl = Namelist(recursive_namelist)
reread = Namelist()
reread.read_string(nl.write())
assert len(reread.electrons) == 0
assert reread.system.ibrav == 2
assert reread.system.occupations == 'smearing'
assert reread.control.prefix == 'al'
|
pylada/pylada-light
|
tests/espresso/test_namelists.py
|
Python
|
gpl-3.0
| 7,784
|
[
"CRYSTAL",
"ESPResSo",
"VASP"
] |
eb8aee0256f883dbc9d97d440a8eb1f06fe06db0e05f48514cc8c14960ba73c0
|
'''
CMEMS module
Contains functions related to local data handling.
Maren K. Karlsen 2020.10.29
'''
import logging
import ftputil
import os
import sys
import re
import hashlib
import datetime
import pandas as pd
import numpy as np
import netCDF4
from modules.CMEMS.Export_CMEMS_netCDF_builder import buildnetcdfs
from modules.Common.data_processing import get_file_from_zip, get_platform, construct_datafilename
from modules.CMEMS.Export_CMEMS_ftp import upload_to_ftp, evaluate_response_file
from modules.CMEMS.Export_CMEMS_sql import update_db_dnt
import xml.etree.ElementTree as ET
import sqlite3
import json
import time
dnt_datetime_format = '%Y-%m-%dT%H:%M:%SZ'
server_location = 'ftp://nrt.cmems-du.eu/Core'
PRODUCT_ID = 'INSITU_GLO_CARBON_NRT_OBSERVATIONS_013_049'
DATASET_ID = 'NRT_202003'
INSTITUTION = 'University of Bergen Geophysical Institute'
INSTITUTION_EDMO = '4595'
LOCAL_FOLDER = 'latest'
def build_netCDFs(dataset,key,dataset_zip,CP_pid):
''' Returns tuple of netCDF filename and bytes'''
dataset_name = construct_datafilename(dataset,'CMEMS',key)
platform = get_platform()
# Load field config - contains info on variables to be included in netCDF
fieldconfig = pd.read_csv('fields.csv', delimiter=',', quotechar='\'')
# Read csv file
csv_file = get_file_from_zip(dataset_zip, dataset_name)
filedata = pd.read_csv(csv_file, delimiter=',')
# Remove datarows with nan position
filedata.drop(filedata[ (filedata['ALONGP01'].isnull()) |
(filedata['ALATGP01'].isnull()) ].index,inplace=True)
# Build netCDF from filecontent
nc_files = buildnetcdfs(dataset['name'], fieldconfig, filedata, platform,CP_pid)
return nc_files
def write_nc_bytes_to_file(nc_name,nc_content):
nc_filepath = LOCAL_FOLDER + '/' + nc_name + '.nc'
with open(nc_filepath,'wb') as f: f.write(nc_content)
return nc_filepath
def update_global_attributes(nc):
# Adding history and last update date to global attributes
datasetdate = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
nc.date_update = datasetdate
nc.history = datasetdate + " : Creation"
def create_metadata_object(nc,nc_name,nc_content,nc_filepath,dataset):
''' Creates metadata dictionary object based on each netCDF'''
# Extracting metadata for index-file
platform_code = nc.platform_code
# time and position
last_lat = nc.last_latitude_observation
last_lon = nc.last_longitude_observation
last_dt = nc.last_date_observation
# list of parameters/variables from netCDF file
var_list = nc.variables.keys()
var_list = list(filter(lambda x: '_' not in x, var_list))
var_list = list(filter(lambda x: 'TIME' not in x, var_list))
var_list = list(filter(lambda x: 'LATITUDE' not in x, var_list))
var_list = list(filter(lambda x: 'LONGITUDE' not in x, var_list))
parameters = ' '.join(var_list)
nc.close()
# create metadata dictionary object
date = nc_name.split('_')[-1]
date = datetime.datetime.strptime(date,'%Y%m%d')
hashsum = hashlib.md5(nc_content).hexdigest()
nc_dict = ({
'filepath':nc_filepath,
'hashsum': hashsum,
'date': date,
'dataset':dataset['name'],
'uploaded':False,
'platform': platform_code,
'parameters':parameters,
'last_lat':last_lat,
'last_lon':last_lon,
'last_dt':last_dt})
return nc_dict
def build_DNT(dnt_upload,dnt_delete):
''' Generates delivery note for NetCDF file upload,
note needed by Copernicus in order to move .nc-file to public-ftp
dnt_upload contains list of files uploaded to the ftp-server
dnt_delete contains list of files to be deleted from the ftp-server
'''
date = datetime.datetime.now().strftime(dnt_datetime_format)
dnt = ET.Element('delivery')
dnt.set('PushingEntity','CopernicusMarine-InSitu-Global')
dnt.set('date', date)
dnt.set('product',PRODUCT_ID)
dataset = ET.SubElement(dnt,'dataset')
dataset.set('DatasetName',DATASET_ID)
# UPLOAD
for item in dnt_upload:
if dnt_upload[item] != {}:
local_filepath = dnt_upload[item]['local_filepath']
ftp_filepath = dnt_upload[item]['ftp_filepath'].split('/',3)[-1]
start_upload_time = dnt_upload[item]['start_upload_time']
stop_upload_time = dnt_upload[item]['stop_upload_time']
with open(local_filepath,'rb') as f:
file_bytes = f.read()
file = ET.SubElement(dataset,'file')
file.set('Checksum',hashlib.md5(file_bytes).hexdigest())
file.set('FileName',ftp_filepath)
file.set('FinalStatus','Delivered')
file.set('StartUploadTime',start_upload_time)
file.set('StopUploadTime',stop_upload_time)
# DELETE
for item in dnt_delete:
if item is not None:
ftp_filepath = dnt_delete[item].split('/',3)[-1]
file_del = ET.SubElement(dataset,'file')
file_del.set('FileName',ftp_filepath)
key_word = ET.SubElement(file_del,'KeyWord')
key_word.text = 'Delete'
xml_tree = ET.ElementTree(dnt)
dnt_file = PRODUCT_ID + '_P' + date + '.xml'
dnt_folder = 'DNT/' + LOCAL_FOLDER + '/'
dnt_filepath = dnt_folder + dnt_file
if not os.path.isdir(dnt_folder): os.mkdir(dnt_folder)
with open(dnt_filepath,'wb') as xml:
xml_tree.write(xml,xml_declaration=True,method='xml')
return dnt_file, dnt_filepath
def build_fDNT(dnt_delete):
''' Generates delivery note for NetCDF folder clean up '''
date = datetime.datetime.now().strftime(dnt_datetime_format)
dnt = ET.Element('delivery')
dnt.set('PushingEntity','CopernicusMarine-InSitu-Global')
dnt.set('date', date)
dnt.set('product',PRODUCT_ID)
dataset = ET.SubElement(dnt,'dataset')
dataset.set('DatasetName',DATASET_ID)
# delete
for item in dnt_delete:
if item is not None:
ftp_filepath = dnt_delete[item].split('/',3)[-1]
file_del = ET.SubElement(dataset,'directory')
file_del.set('DestinationFolderName','')
file_del.set('SourceFolderName',ftp_filepath.rsplit('/',1)[0])
key_word = ET.SubElement(file_del,'KeyWord')
key_word.text = 'Delete'
xml_tree = ET.ElementTree(dnt)
# logging.debug('DNT file:\n' + str(ET.dump(xml_tree)))
dnt_file = PRODUCT_ID + '_P' + date + '.xml'
dnt_folder = 'DNT/' + LOCAL_FOLDER + '/'
dnt_filepath = dnt_folder + dnt_file
try: os.mkdir(dnt_folder);
except Exception as e:
pass
with open(dnt_filepath,'wb') as xml:
xml_tree.write(xml,xml_declaration=True,method='xml')
return dnt_file, dnt_filepath
def build_index(db):
'''
Creates index-file of CMEMS directory.
Lists all files currently uploaded to the CMEMS server.
'''
try:
db.execute("SELECT * FROM latest WHERE uploaded == 1")
currently_uploaded = db.fetchall()
date_header = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
index_header = ('# Title : Carbon in-situ observations catalog \n'\
+ '# Description : catalog of available in-situ observations per platform.\n'\
+ '# Project : Copernicus \n# Format version : 1.0 \n'\
+ '# Date of update : ' + date_header +'\n'
+ '# catalog_id,file_name,geospatial_lat_min,geospatial_lat_max,'\
+ 'geospatial_lon_min,geospatial_lon_max,time_coverage_start,'\
+ 'time_coverage_end,provider,date_update,data_mode,parameters\n')
index_info = ''
for file in currently_uploaded:
local_filepath = file[2]
ftp_filepath = file[6].replace(DATASET_ID,'NRT') # Upload URL differs from host URL /NRT_202003/ --> /NRT/
nc = netCDF4.Dataset(local_filepath,mode='r')
lat_min = nc.geospatial_lat_min
lat_max = nc.geospatial_lat_max
lon_min = nc.geospatial_lon_min
lon_max = nc.geospatial_lon_max
time_start = nc.time_coverage_start
time_end = nc.time_coverage_end
date_update = nc.date_update
nc.close()
parameters = str(file[11])
index_info += ('COP-GLOBAL-01,' + server_location + ftp_filepath + ','
+ lat_min + ',' + lat_max + ',' + lon_min + ',' + lon_max + ','
+ time_start + ',' + time_end + ',' + INSTITUTION +','
+ date_update + ',R,' + parameters + '\n')
index_latest = index_header + index_info
index_filename = 'index_latest.txt'
with open(index_filename,'wb') as f: f.write(index_latest.encode())
logging.debug('index file:\n' + index_latest)
except Exception as e:
logging.error('Building index failed: ', exc_info=True)
status = 0
error += 'Building index failed: ' + str(e)
return index_filename
def build_index_platform(db,platforms,error_msg):
'''
Creates index-file of CMEMS directory.
Lists all platforms uploaded to the CMEMS server.
'''
try:
date_header = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
index_header = ('# Title : In Situ platforms catalog \n'\
+ '# Description : catalog of available In Situ platforms.\n'\
+ '# Project : Copernicus \n# Format version : 1.0 \n'\
+ '# Date of update : ' + date_header +'\n'
+ '# platform_code,creation_date,update_date,wmo_platform_code,data_source,'
+ 'institution,institution_edmo_code,parameter,last_latitude_observation,'
+ 'last_longitude_observation,last_date_observation \n')
# Get unique platforms from db
db.execute("SELECT DISTINCT platform FROM latest")
unique_platforms = db.fetchall()
logging.debug(unique_platforms)
if (None,) in unique_platforms: unique_platforms.remove((None,))
index_info = ''
for unique_platform in unique_platforms:
platform_id = platforms[unique_platform[0]]['platform_id']
# Fetch most recent entry for *platform*
db.execute("SELECT * FROM latest WHERE platform = ? ORDER BY last_dt DESC",
[unique_platform[0]])
db_last = db.fetchone()
index_info += (platforms[platform_id]['call_sign'] + ','
+ str(platforms[platform_id]['creation_date']) + ','
+ str(db_last[9]) + ','
+ platform_id + ','
+ 'GL_TS_TS_' + platforms[platform_id]['call_sign'] + '_XXXXXX,'
+ INSTITUTION + ',' + INSTITUTION_EDMO + ','
+ str(db_last[11]) + ','
+ str(db_last[12]) + ','
+ str(db_last[13]) + ','
+ str(db_last[14]) + '\n')
index_platform = index_header + index_info
index_filename = 'index_platform.txt'
with open(index_filename,'wb') as f: f.write(index_platform.encode())
logging.debug('index file:\n' + index_platform)
except Exception as e:
logging.error('Building platform index failed: ', exc_info=True)
error_msg += 'Building platform index failed: ' + str(e)
return index_filename, error_msg
def upload_DNT(dnt_file,dnt_local_filepath,error_msg,ftp,db):
upload_result, dnt, error_msg= (
upload_to_ftp(ftp, dnt_local_filepath,error_msg,db))
update_db_dnt(db,dnt_local_filepath)
dnt_local_folder = dnt_local_filepath.rsplit('/',1)[0]
dnt_ftp_filename = dnt_local_filepath.rsplit('/',1)[-1]
try:
response = evaluate_response_file(
ftp,dnt_ftp_filename,dnt_local_folder,db)
logging.debug(f'cmems dnt-response: {response}')
except Exception as e:
logging.error('No response from CMEMS: ', exc_info=True)
error_msg += 'No response from CMEMS: ' + str(e)
return response, error_msg
|
squaregoldfish/QuinCe
|
external_scripts/export/modules/CMEMS/Export_CMEMS_metadata.py
|
Python
|
gpl-3.0
| 11,248
|
[
"NetCDF"
] |
184a4ceea3496bb30f3aec2fed6499bf60b3e0faf01d381a8450964adb58e812
|
# -*- coding: utf-8 -*-
import codecs
import json
import os
import tempfile
from django import forms
from django.core.files.storage import default_storage as storage
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from mock import patch
from nose.tools import eq_
from pyquery import PyQuery as pq
from mkt.developers.views import standalone_hosted_upload, trap_duplicate
from mkt.files.helpers import copyfileobj
from mkt.files.models import FileUpload
from mkt.files.tests.test_models import UploadTest as BaseUploadTest
from mkt.files.utils import WebAppParser
from mkt.site.fixtures import fixture
from mkt.site.tests import MktPaths, TestCase
from mkt.site.tests.test_utils_ import get_image_path
from mkt.submit.tests.test_views import BaseWebAppTest
from mkt.users.models import UserProfile
class TestWebApps(TestCase, MktPaths):
def setUp(self):
self.webapp_path = tempfile.mktemp(suffix='.webapp')
with storage.open(self.webapp_path, 'wb') as f:
copyfileobj(open(os.path.join(os.path.dirname(__file__),
'addons', 'mozball.webapp')),
f)
self.tmp_files = []
self.manifest = dict(name=u'Ivan Krsti\u0107', version=u'1.0',
description=u'summary',
developer=dict(name=u'Dev Namé'))
def tearDown(self):
for tmp in self.tmp_files:
storage.delete(tmp)
def webapp(self, data=None, contents='', suffix='.webapp'):
tmp = tempfile.mktemp(suffix=suffix)
self.tmp_files.append(tmp)
with storage.open(tmp, 'wb') as f:
f.write(json.dumps(data) if data else contents)
return tmp
def test_parse(self):
wp = WebAppParser().parse(self.webapp_path)
eq_(wp['guid'], None)
eq_(wp['description']['en-US'],
u'Exciting Open Web development action!')
# UTF-8 byte string decoded to unicode.
eq_(wp['description']['es'],
u'\xa1Acci\xf3n abierta emocionante del desarrollo del Web!')
eq_(wp['description']['it'],
u'Azione aperta emozionante di sviluppo di fotoricettore!')
eq_(wp['version'], '1.0')
eq_(wp['default_locale'], 'en-US')
def test_parse_packaged(self):
wp = WebAppParser().parse(self.packaged_app_path('mozball.zip'))
eq_(wp['guid'], None)
eq_(wp['name']['en-US'], u'Packaged MozillaBall ょ')
eq_(wp['description']['en-US'],
u'Exciting Open Web development action!')
eq_(wp['description']['es'],
u'¡Acción abierta emocionante del desarrollo del Web!')
eq_(wp['description']['it'],
u'Azione aperta emozionante di sviluppo di fotoricettore!')
eq_(wp['version'], '1.0')
eq_(wp['default_locale'], 'en-US')
def test_parse_packaged_BOM(self):
wp = WebAppParser().parse(self.packaged_app_path('mozBOM.zip'))
eq_(wp['guid'], None)
eq_(wp['name']['en-US'], u'Packaged MozBOM ょ')
eq_(wp['description']['en-US'], u'Exciting BOM action!')
eq_(wp['description']['es'], u'¡Acción BOM!')
eq_(wp['description']['it'], u'Azione BOM!')
eq_(wp['version'], '1.0')
eq_(wp['default_locale'], 'en-US')
def test_no_manifest_at_root(self):
with self.assertRaises(forms.ValidationError) as exc:
WebAppParser().parse(
self.packaged_app_path('no-manifest-at-root.zip'))
m = exc.exception.messages[0]
assert m.startswith('The file "manifest.webapp" was not found'), (
'Unexpected: %s' % m)
def test_no_locales(self):
wp = WebAppParser().parse(self.webapp(
dict(name='foo', version='1.0', description='description',
developer=dict(name='bar'))))
eq_(wp['description']['en-US'], u'description')
def test_no_description(self):
wp = WebAppParser().parse(self.webapp(
dict(name='foo', version='1.0', developer=dict(name='bar'))))
eq_(wp['description'], {})
def test_syntax_error(self):
with self.assertRaises(forms.ValidationError) as exc:
WebAppParser().parse(self.webapp(contents='}]'))
m = exc.exception.messages[0]
assert m.startswith('The webapp manifest is not valid JSON.'), (
'Unexpected: %s' % m)
def test_utf8_bom(self):
wm = codecs.BOM_UTF8 + json.dumps(self.manifest, encoding='utf8')
wp = WebAppParser().parse(self.webapp(contents=wm))
eq_(wp['version'], '1.0')
def test_non_ascii(self):
wm = json.dumps(dict(name=u'まつもとゆきひろ', version='1.0',
developer=dict(name=u'まつもとゆきひろ')),
encoding='shift-jis')
wp = WebAppParser().parse(self.webapp(contents=wm))
eq_(wp['name'], {'en-US': u'まつもとゆきひろ'})
class TestTrapDuplicate(BaseWebAppTest):
def setUp(self):
super(TestTrapDuplicate, self).setUp()
self.create_switch('webapps-unique-by-domain')
self.req = RequestFactory().get('/')
self.req.user = UserProfile.objects.get(pk=999)
@patch('mkt.developers.views.trap_duplicate')
def test_trap_duplicate_skipped_on_standalone(self, trap_duplicate_mock):
self.post()
standalone_hosted_upload(self.req)
assert not trap_duplicate_mock.called
def test_trap_duplicate(self):
self.post_addon()
standalone_hosted_upload(self.req)
assert trap_duplicate(self.req, 'http://allizom.org/mozball.webapp')
class TestStandaloneValidation(BaseUploadTest):
fixtures = fixture('user_999')
def setUp(self):
super(TestStandaloneValidation, self).setUp()
self.login('regular@mozilla.com')
# Upload URLs
self.hosted_upload = reverse(
'mkt.developers.standalone_hosted_upload')
self.packaged_upload = reverse(
'mkt.developers.standalone_packaged_upload')
def hosted_detail(self, uuid):
return reverse('mkt.developers.standalone_upload_detail',
args=['hosted', uuid])
def packaged_detail(self, uuid):
return reverse('mkt.developers.standalone_upload_detail',
args=['packaged', uuid])
def upload_detail(self, uuid):
return reverse('mkt.developers.upload_detail', args=[uuid])
def test_context(self):
res = self.client.get(reverse('mkt.developers.validate_app'))
eq_(res.status_code, 200)
doc = pq(res.content)
eq_(doc('#upload-webapp-url').attr('data-upload-url'),
self.hosted_upload)
eq_(doc('#upload-app').attr('data-upload-url'), self.packaged_upload)
def detail_view(self, url_factory, upload):
res = self.client.get(url_factory(upload.uuid))
res_json = json.loads(res.content)
eq_(res_json['url'], url_factory(upload.uuid))
eq_(res_json['full_report_url'], self.upload_detail(upload.uuid))
res = self.client.get(self.upload_detail(upload.uuid))
eq_(res.status_code, 200)
doc = pq(res.content)
assert doc('header h1').text().startswith('Validation Results for ')
suite = doc('#addon-validator-suite')
# All apps have a `validateurl` value that corresponds to a hosted app.
eq_(suite.attr('data-validateurl'), self.hosted_detail(upload.uuid))
@patch('mkt.developers.tasks._fetch_manifest')
def test_hosted_detail(self, fetch_manifest):
def update_upload(url, upload):
with open(os.path.join(os.path.dirname(__file__),
'addons', 'mozball.webapp'), 'r') as data:
return data.read()
fetch_manifest.side_effect = update_upload
res = self.client.post(
self.hosted_upload, {'manifest': 'http://foo.bar/'}, follow=True)
eq_(res.status_code, 200)
uuid = json.loads(res.content)['upload']
upload = FileUpload.objects.get(uuid=uuid)
eq_(upload.user.pk, 999)
self.detail_view(self.hosted_detail, upload)
def test_packaged_detail(self):
data = open(get_image_path('animated.png'), 'rb')
self.client.post(self.packaged_upload, {'upload': data})
upload = FileUpload.objects.get(name='animated.png')
self.detail_view(self.packaged_detail, upload)
|
clouserw/zamboni
|
mkt/developers/tests/test_views_validation.py
|
Python
|
bsd-3-clause
| 8,515
|
[
"exciting"
] |
8723107ef61d4b94705064bedc1f07e011948835364d0f624442de96594a2be8
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Paraview(CMakePackage):
"""ParaView is an open-source, multi-platform data analysis and
visualization application."""
homepage = 'http://www.paraview.org'
url = "http://www.paraview.org/files/v5.3/ParaView-v5.3.0.tar.gz"
_urlfmt = 'http://www.paraview.org/files/v{0}/ParaView-v{1}{2}.tar.gz'
version('5.4.0', 'b92847605bac9036414b644f33cb7163')
version('5.3.0', '68fbbbe733aa607ec13d1db1ab5eba71')
version('5.2.0', '4570d1a2a183026adb65b73c7125b8b0')
version('5.1.2', '44fb32fc8988fcdfbc216c9e40c3e925')
version('5.0.1', 'fdf206113369746e2276b95b257d2c9b')
version('4.4.0', 'fa1569857dd680ebb4d7ff89c2227378')
variant('plugins', default=True,
description='Install include files for plugins support')
variant('python', default=False, description='Enable Python support')
variant('mpi', default=True, description='Enable MPI support')
variant('osmesa', default=False, description='Enable OSMesa support')
variant('qt', default=False, description='Enable Qt (gui) support')
variant('opengl2', default=True, description='Enable OpenGL2 backend')
depends_on('python@2:2.8', when='+python')
depends_on('py-numpy', when='+python', type='run')
depends_on('py-matplotlib', when='+python', type='run')
depends_on('mpi', when='+mpi')
depends_on('qt', when='@5.3.0:+qt')
depends_on('qt@:4', when='@:5.2.0+qt')
depends_on('bzip2')
depends_on('freetype')
# depends_on('hdf5+mpi', when='+mpi')
# depends_on('hdf5~mpi', when='~mpi')
depends_on('jpeg')
depends_on('libpng')
depends_on('libtiff')
depends_on('libxml2')
# depends_on('netcdf')
# depends_on('netcdf-cxx')
# depends_on('protobuf') # version mismatches?
# depends_on('sqlite') # external version not supported
depends_on('zlib')
depends_on('cmake@3.3:', type='build')
patch('stl-reader-pv440.patch', when='@4.4.0')
# Broken gcc-detection - improved in 5.1.0, redundant later
patch('gcc-compiler-pv501.patch', when='@:5.0.1')
# Broken installation (ui_pqExportStateWizard.h) - fixed in 5.2.0
patch('ui_pqExportStateWizard.patch', when='@:5.1.2')
def url_for_version(self, version):
"""Handle ParaView version-based custom URLs."""
if version < Version('5.1.0'):
return self._urlfmt.format(version.up_to(2), version, '-source')
else:
return self._urlfmt.format(version.up_to(2), version, '')
def cmake_args(self):
"""Populate cmake arguments for ParaView."""
spec = self.spec
def variant_bool(feature, on='ON', off='OFF'):
"""Ternary for spec variant to ON/OFF string"""
if feature in spec:
return on
return off
def nvariant_bool(feature):
"""Negated ternary for spec variant to OFF/ON string"""
return variant_bool(feature, on='OFF', off='ON')
rendering = variant_bool('+opengl2', 'OpenGL2', 'OpenGL')
includes = variant_bool('+plugins')
cmake_args = [
'-DPARAVIEW_BUILD_QT_GUI:BOOL=%s' % variant_bool('+qt'),
'-DVTK_OPENGL_HAS_OSMESA:BOOL=%s' % variant_bool('+osmesa'),
'-DVTK_USE_X:BOOL=%s' % nvariant_bool('+osmesa'),
'-DVTK_RENDERING_BACKEND:STRING=%s' % rendering,
'-DPARAVIEW_INSTALL_DEVELOPMENT_FILES:BOOL=%s' % includes,
'-DBUILD_TESTING:BOOL=OFF',
'-DVTK_USE_SYSTEM_FREETYPE:BOOL=ON',
'-DVTK_USE_SYSTEM_HDF5:BOOL=OFF',
'-DVTK_USE_SYSTEM_JPEG:BOOL=ON',
'-DVTK_USE_SYSTEM_LIBXML2:BOOL=ON',
'-DVTK_USE_SYSTEM_NETCDF:BOOL=OFF',
'-DVTK_USE_SYSTEM_TIFF:BOOL=ON',
'-DVTK_USE_SYSTEM_ZLIB:BOOL=ON',
]
# The assumed qt version changed to QT5 (as of paraview 5.2.1),
# so explicitly specify which QT major version is actually being used
if '+qt' in spec:
cmake_args.extend([
'-DPARAVIEW_QT_VERSION=%s' % spec['qt'].version[0],
])
if '+python' in spec:
cmake_args.extend([
'-DPARAVIEW_ENABLE_PYTHON:BOOL=ON',
'-DPYTHON_EXECUTABLE:FILEPATH=%s' % spec['python'].command.path
])
if '+mpi' in spec:
cmake_args.extend([
'-DPARAVIEW_USE_MPI:BOOL=ON',
'-DMPIEXEC:FILEPATH=%s/bin/mpiexec' % spec['mpi'].prefix
])
if 'darwin' in self.spec.architecture:
cmake_args.extend([
'-DVTK_USE_X:BOOL=OFF',
'-DPARAVIEW_DO_UNIX_STYLE_INSTALLS:BOOL=ON',
])
return cmake_args
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/paraview/package.py
|
Python
|
lgpl-2.1
| 5,978
|
[
"NetCDF",
"ParaView"
] |
28a51f207db6f0c8c6abf47867c76a6cdc35d17a87a4ec09f69e0d8f91ba2ad3
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Johan Gonqvist <johan.gronqvist@gmail.com>
# Copyright (C) 2007-2009 Gary Burton <gary.burton@zen.co.uk>
# Copyright (C) 2007-2009 Stephane Charette <stephanecharette@gmail.com>
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2008 Jason M. Simanek <jason@bohemianalps.com>
# Copyright (C) 2008-2011 Rob G. Healey <robhealey1@gmail.com>
# Copyright (C) 2010 Doug Blank <doug.blank@gmail.com>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010-2016 Serge Noiraud
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Benny Malengier
# Copyright (C) 2016 Allen Crider
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Narrative Web Page generator.
Classes:
NavWebReport - main class that produces the report. Entry point to produce
the report is write_report
NavWebOptions - class that defines the options and provides the handling
interface
BasePage - super class for producing a web page. This class is instantiated
once for each page. Provdes various common functions.
Classes for producing the web pages:
SurnamePage - creates list of individuals with same surname
FamilyPage - Family index page and individual Family pages
PlacePage - Place index page and individual Place pages
EventPage - Event index page and individual Event pages
SurnameListPage - Index for first letters of surname
IntroductionPage
HomePage
CitationPages - dummy
SourcePage - Source index page and individual Source pages
MediaPage - Media index page and individual Media pages
ThimbnailPreviewPage
DownloadPage
ContactPage
PersonPage - Person index page and individual `Person pages
RepositoryPage - Repository index page and individual Repository pages
AddressBookListPage
AddressBookPage
"""
#------------------------------------------------
# python modules
#------------------------------------------------
from functools import partial
import gc
import os
import sys
import re
import copy
from hashlib import md5
import time, datetime
import shutil
import tarfile
import tempfile
from io import BytesIO, TextIOWrapper
from unicodedata import normalize
from collections import defaultdict
from xml.sax.saxutils import escape
from operator import itemgetter
from decimal import Decimal, getcontext
getcontext().prec = 8
#------------------------------------------------
# Set up logging
#------------------------------------------------
import logging
LOG = logging.getLogger(".NarrativeWeb")
#------------------------------------------------
# Gramps module
#------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gen.lib import (ChildRefType, Date, EventType, FamilyRelType, Name,
NameType, Person, UrlType, NoteType, PlaceType,
EventRoleType, Family, Event, Place, Source,
Citation, Media, Repository, Note, Tag)
from gramps.gen.lib.date import Today
from gramps.gen.const import PROGRAM_NAME, URL_HOMEPAGE
from gramps.version import VERSION
from gramps.gen.plug.menu import (PersonOption, NumberOption, StringOption,
BooleanOption, EnumeratedListOption,
FilterOption, NoteOption, MediaOption,
DestinationOption)
from gramps.gen.plug.report import (Report, Bibliography)
from gramps.gen.plug.report import utils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.report import stdoptions
from gramps.gen.utils.config import get_researcher
from gramps.gen.utils.string import conf_strings
from gramps.gen.utils.file import media_path_full
from gramps.gen.utils.alive import probably_alive
from gramps.gen.constfunc import win, get_curr_dir
from gramps.gen.config import config
from gramps.gen.utils.thumbnails import get_thumbnail_path, run_thumbnailer
from gramps.gen.utils.image import image_size # , resize_to_jpeg_buffer
from gramps.gen.display.name import displayer as _nd
from gramps.gen.display.place import displayer as _pd
from gramps.gen.proxy import CacheProxyDb
from gramps.plugins.lib.libhtmlconst import _CHARACTER_SETS, _CC, _COPY_OPTIONS
from gramps.gen.datehandler import get_date
# import HTML Class from src/plugins/lib/libhtml.py
from gramps.plugins.lib.libhtml import Html, xml_lang
# import styled notes from src/plugins/lib/libhtmlbackend.py
from gramps.plugins.lib.libhtmlbackend import HtmlBackend, process_spaces
from gramps.plugins.lib.libgedcom import make_gedcom_date, DATE_QUALITY
from gramps.gen.utils.place import conv_lat_lon
from gramps.gen.plug import BasePluginManager
from gramps.gen.relationship import get_relationship_calculator
from gramps.gen.utils.location import get_main_location
COLLATE_LANG = glocale.collation
SORT_KEY = glocale.sort_key
#------------------------------------------------
# Everything below this point is identical for gramps34 (post 3.4.2),
# gramps40 and trunk
#------------------------------------------------
#------------------------------------------------
# constants
#------------------------------------------------
GOOGLE_MAPS = 'https://maps.googleapis.com/maps/'
# javascript code for marker path
MARKER_PATH = """
var marker_png = '%s'
"""
# javascript code for Google's FamilyLinks...
FAMILYLINKS = """
var tracelife = %s
function initialize() {
var myLatLng = new google.maps.LatLng(%s, %s);
var mapOptions = {
scaleControl: true,
panControl: true,
backgroundColor: '#000000',
zoom: %d,
center: myLatLng,
mapTypeId: google.maps.MapTypeId.ROADMAP
};
var map = new google.maps.Map(document.getElementById("map_canvas"),
mapOptions);
var flightPath = new google.maps.Polyline({
path: tracelife,
strokeColor: "#FF0000",
strokeOpacity: 1.0,
strokeWeight: 2
});
flightPath.setMap(map);
}"""
# javascript for Google's Drop Markers...
DROPMASTERS = """
var markers = [];
var iterator = 0;
var tracelife = %s
var map;
var myLatLng = new google.maps.LatLng(%s, %s);
function initialize() {
var mapOptions = {
scaleControl: true,
zoomControl: true,
zoom: %d,
mapTypeId: google.maps.MapTypeId.ROADMAP,
center: myLatLng,
};
map = new google.maps.Map(document.getElementById("map_canvas"),
mapOptions);
};
function drop() {
for (var i = 0; i < tracelife.length; i++) {
setTimeout(function() {
addMarker();
}, i * 1000);
}
}
function addMarker() {
var location = tracelife[iterator];
var myLatLng = new google.maps.LatLng(location[1], location[2]);
markers.push(new google.maps.Marker({
position: myLatLng,
map: map,
draggable: true,
title: location[0],
animation: google.maps.Animation.DROP
}));
iterator++;
}"""
# javascript for Google's Markers...
MARKERS = """
var tracelife = %s
var map;
var myLatLng = new google.maps.LatLng(%s, %s);
function initialize() {
var mapOptions = {
scaleControl: true,
panControl: true,
backgroundColor: '#000000',
zoom: %d,
center: myLatLng,
mapTypeId: google.maps.MapTypeId.ROADMAP
};
map = new google.maps.Map(document.getElementById("map_canvas"),
mapOptions);
addMarkers();
}
function addMarkers() {
var bounds = new google.maps.LatLngBounds();
for (var i = 0; i < tracelife.length; i++) {
var location = tracelife[i];
var myLatLng = new google.maps.LatLng(location[1], location[2]);
var marker = new google.maps.Marker({
position: myLatLng,
draggable: true,
title: location[0],
map: map,
zIndex: location[3]
});
bounds.extend(myLatLng);
if ( i > 1 ) { map.fitBounds(bounds); };
}
}"""
# javascript for OpenStreetMap's markers...
OSM_MARKERS = """
function initialize(){
var map;
var tracelife = %s;
var iconStyle = new ol.style.Style({
image: new ol.style.Icon(({
opacity: 1.0,
src: marker_png
}))
});
var markerSource = new ol.source.Vector({
});
for (var i = 0; i < tracelife.length; i++) {
var loc = tracelife[i];
var iconFeature = new ol.Feature({
geometry: new ol.geom.Point(ol.proj.transform([loc[0], loc[1]],
'EPSG:4326', 'EPSG:3857')),
name: loc[2],
});
iconFeature.setStyle(iconStyle);
markerSource.addFeature(iconFeature);
}
markerLayer = new ol.layer.Vector({
source: markerSource,
style: iconStyle
});
var centerCoord = new ol.proj.transform([%s, %s], 'EPSG:4326', 'EPSG:3857');
map= new ol.Map({
target: 'map_canvas',
layers: [new ol.layer.Tile({ source: new ol.source.OSM() }),
markerLayer],
view: new ol.View({ center: centerCoord, zoom: %d })
});
var element = document.getElementById('popup');
var tooltip = new ol.Overlay({
element: element,
positioning: 'bottom-center',
stopEvent: false
});
map.addOverlay(tooltip);
var displayFeatureInfo = function(pixel) {
var feature = map.forEachFeatureAtPixel(pixel, function(feature, layer) {
return feature;
});
var info = document.getElementById('popup');
if (feature) {
var geometry = feature.getGeometry();
var coord = geometry.getCoordinates();
tooltip.setPosition(coord);
$(element).siblings('.popover').css({ width: '250px' });
$(element).siblings('.popover').css({ background: '#aaa' });
$(info).popover({
'placement': 'auto',
'html': true,
'content': feature.get('name')
});
$(info).popover('show');
} else {
// TODO : some warning with firebug here
$(info).popover('destroy');
$('.popover').remove();
}
};
map.on('pointermove', function(evt) {
if (evt.dragging) {
return;
}
var pixel = map.getEventPixel(evt.originalEvent);
displayFeatureInfo(pixel);
});
map.on('click', function(evt) {
displayFeatureInfo(evt.pixel);
});
};
"""
# there is no need to add an ending "</script>",
# as it will be added automatically by libhtml()
# Events that are usually a family event
_EVENTMAP = set([EventType.MARRIAGE, EventType.MARR_ALT,
EventType.MARR_SETTL, EventType.MARR_LIC,
EventType.MARR_CONTR, EventType.MARR_BANNS,
EventType.ENGAGEMENT, EventType.DIVORCE,
EventType.DIV_FILING])
# define clear blank line for proper styling
FULLCLEAR = Html("div", class_="fullclear", inline=True)
# Names for stylesheets
_NARRATIVESCREEN = "narrative-screen.css"
_NARRATIVEPRINT = "narrative-print.css"
# variables for alphabet_navigation()
_KEYPERSON, _KEYPLACE, _KEYEVENT, _ALPHAEVENT = 0, 1, 2, 3
# Web page filename extensions
_WEB_EXT = ['.html', '.htm', '.shtml', '.php', '.php3', '.cgi']
_NAME_COL = 3
_DEFAULT_MAX_IMG_WIDTH = 800 # resize images that are wider than this
_DEFAULT_MAX_IMG_HEIGHT = 600 # resize images that are taller than this
# The two values above are settable in options.
_WIDTH = 160
_HEIGHT = 64
_VGAP = 10
_HGAP = 30
_SHADOW = 5
_XOFFSET = 5
_WRONGMEDIAPATH = []
_NAME_STYLE_SHORT = 2
_NAME_STYLE_DEFAULT = 1
_NAME_STYLE_FIRST = 0
_NAME_STYLE_SPECIAL = None
PLUGMAN = BasePluginManager.get_instance()
CSS = PLUGMAN.process_plugin_data('WEBSTUFF')
_HTML_DBL_QUOTES = re.compile(r'([^"]*) " ([^"]*) " (.*)', re.VERBOSE)
_HTML_SNG_QUOTES = re.compile(r"([^']*) ' ([^']*) ' (.*)", re.VERBOSE)
# This command then defines the 'html_escape' option for escaping
# special characters for presentation in HTML based on the above list.
def html_escape(text):
"""Convert the text and replace some characters with a &# variant."""
# First single characters, no quotes
text = escape(text)
# Deal with double quotes.
match = _HTML_DBL_QUOTES.match(text)
while match:
text = "%s" "“" "%s" "”" "%s" % match.groups()
match = _HTML_DBL_QUOTES.match(text)
# Replace remaining double quotes.
text = text.replace('"', '"')
# Deal with single quotes.
text = text.replace("'s ", '’s ')
match = _HTML_SNG_QUOTES.match(text)
while match:
text = "%s" "‘" "%s" "’" "%s" % match.groups()
match = _HTML_SNG_QUOTES.match(text)
# Replace remaining single quotes.
text = text.replace("'", ''')
return text
def name_to_md5(text):
"""This creates an MD5 hex string to be used as filename."""
return md5(text.encode('utf-8')).hexdigest()
def conf_priv(obj):
"""
Return private string
@param: obj -- The object reference
"""
if obj.get_privacy() != 0:
return ' priv="%d"' % obj.get_privacy()
else:
return ''
def get_gendex_data(database, event_ref):
"""
Given an event, return the date and place a strings
@param: database -- The database
@param: event_ref -- The event reference
"""
doe = "" # date of event
poe = "" # place of event
if event_ref and event_ref.ref:
event = database.get_event_from_handle(event_ref.ref)
if event:
date = event.get_date_object()
doe = format_date(date)
if event.get_place_handle():
place_handle = event.get_place_handle()
if place_handle:
place = database.get_place_from_handle(place_handle)
if place:
poe = _pd.display(database, place, date)
return doe, poe
def format_date(date):
"""
Format the date
"""
start = date.get_start_date()
if start != Date.EMPTY:
cal = date.get_calendar()
mod = date.get_modifier()
quality = date.get_quality()
if quality in DATE_QUALITY:
qual_text = DATE_QUALITY[quality] + " "
else:
qual_text = ""
if mod == Date.MOD_SPAN:
val = "%sFROM %s TO %s" % (
qual_text,
make_gedcom_date(start, cal, mod, None),
make_gedcom_date(date.get_stop_date(), cal, mod, None))
elif mod == Date.MOD_RANGE:
val = "%sBET %s AND %s" % (
qual_text,
make_gedcom_date(start, cal, mod, None),
make_gedcom_date(date.get_stop_date(), cal, mod, None))
else:
val = make_gedcom_date(start, cal, mod, quality)
return val
return ""
# pylint: disable=unused-variable
# pylint: disable=unused-argument
class BasePage:
"""
Manages all the functions, variables, and everything needed
for all of the classes contained within this plugin
"""
def __init__(self, report, title, gid=None):
"""
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: gid -- The family gramps ID
"""
self.uplink = False
# class to do conversion of styled notes to html markup
self._backend = HtmlBackend()
self._backend.build_link = report.build_link
self.report = report
self.r_db = report.database
self.r_user = report.user
self.title_str = title
self.gid = gid
self.bibli = Bibliography()
self.page_title = ""
self.author = get_researcher().get_name()
if self.author:
self.author = self.author.replace(',,,', '')
# TODO. All of these attributes are not necessary, because we have
# also the options in self.options. Besides, we need to check which
# are still required.
self.html_dir = report.options['target']
self.ext = report.options['ext']
self.noid = report.options['nogid']
self.linkhome = report.options['linkhome']
self.create_media = report.options['gallery']
self.create_unused_media = report.options['unused']
self.create_thumbs_only = report.options['create_thumbs_only']
self.inc_families = report.options['inc_families']
self.inc_events = report.options['inc_events']
self.usecms = report.options['usecms']
self.target_uri = report.options['cmsuri']
self.usecal = report.options['usecal']
self.target_cal_uri = report.options['caluri']
self.familymappages = None
lang = report.options['trans']
self.rlocale = report.set_locale(lang)
self._ = self.rlocale.translation.sgettext
# Functions used when no Web Page plugin is provided
def add_instance(self, *param):
"""
Add an instance
"""
pass
def display_pages(self, title):
"""
Display the pages
"""
pass
def sort_on_name_and_grampsid(self, handle):
""" Used to sort on name and gramps ID. """
person = self.r_db.get_person_from_handle(handle)
name = _nd.display(person)
return (name, person.get_gramps_id())
def sort_on_grampsid(self, event_ref):
"""
Sort on gramps ID
"""
evt = self.r_db.get_event_from_handle(
event_ref.ref)
return evt.get_gramps_id()
def copy_thumbnail(self, handle, photo, region=None):
"""
Given a handle (and optional region) make (if needed) an
up-to-date cache of a thumbnail, and call report.copy_file
to copy the cached thumbnail to the website.
Return the new path to the image.
"""
to_dir = self.report.build_path('thumb', handle)
to_path = os.path.join(to_dir, handle) + (
('%d,%d-%d,%d.png' % region) if region else '.png'
)
if photo.get_mime_type():
full_path = media_path_full(self.r_db, photo.get_path())
from_path = get_thumbnail_path(full_path,
photo.get_mime_type(),
region)
if not os.path.isfile(from_path):
from_path = CSS["Document"]["filename"]
else:
from_path = CSS["Document"]["filename"]
self.report.copy_file(from_path, to_path)
return to_path
def get_nav_menu_hyperlink(self, url_fname, nav_text):
"""
Returns the navigation menu hyperlink
"""
if url_fname == self.target_cal_uri:
uplink = False
else:
uplink = self.uplink
# check for web page file extension?
if not _has_webpage_extension(url_fname):
url_fname += self.ext
# get menu item url and begin hyperlink...
url = self.report.build_url_fname(url_fname, None, uplink)
return Html("a", nav_text, href=url, title=nav_text, inline=True)
def get_column_data(self, unordered, data_list, column_title):
"""
Returns the menu column for Drop Down Menus and Drop Down Citations
"""
if len(data_list) == 0:
return
elif len(data_list) == 1:
url_fname, nav_text = data_list[0][0], data_list[0][1]
hyper = self.get_nav_menu_hyperlink(url_fname, nav_text)
unordered.extend(
Html("li", hyper, inline=True)
)
else:
col_list = Html("li") + (
Html("a", column_title, href="#",
title=column_title, inline=True)
)
unordered += col_list
unordered1 = Html("ul")
col_list += unordered1
for url_fname, nav_text in data_list:
hyper = self.get_nav_menu_hyperlink(url_fname, nav_text)
unordered1.extend(Html("li", hyper, inline=True))
def display_relationships(self, individual, place_lat_long):
"""
Displays a person's relationships ...
@param: family_handle_list -- families in this report database
@param: place_lat_long -- for use in Family Map Pages. This will be None
if called from Family pages, which do not create a Family Map
"""
family_list = individual.get_family_handle_list()
if not family_list:
return None
with Html("div", class_="subsection", id="families") as section:
section += Html("h4", self._("Families"), inline=True)
table_class = "infolist"
if len(family_list) > 1:
table_class += " fixed_subtables"
with Html("table", class_=table_class) as table:
section += table
for family_handle in family_list:
family = self.r_db.get_family_from_handle(family_handle)
if family:
link = self.family_link(
family_handle,
self.report.obj_dict[Family][family_handle][1],
gid=family.get_gramps_id(), uplink=True)
trow = Html("tr", class_="BeginFamily") + (
Html("td", " ", class_="ColumnType",
inline=True),
Html("td", " ", class_="ColumnAttribute",
inline=True),
Html("td", link, class_="ColumnValue",
inline=True)
)
table += trow
# find the spouse of the principal individual and
# display that person
sp_hdl = utils.find_spouse(individual, family)
if sp_hdl:
spouse = self.r_db.get_person_from_handle(sp_hdl)
if spouse:
table += self.display_spouse(spouse, family,
place_lat_long)
details = self.display_family_details(family,
place_lat_long)
if details is not None:
table += details
return section
def display_family_relationships(self, family, place_lat_long):
"""
Displays a family's relationships ...
@param: family -- the family to be displayed
@param: place_lat_long -- for use in Family Map Pages. This will be None
if called from Family pages, which do not create a Family Map
"""
with Html("div", class_="subsection", id="families") as section:
section += Html("h4", self._("Families"), inline=True)
table_class = "infolist"
with Html("table", class_=table_class) as table:
section += table
for person_hdl in [family.get_father_handle(),
family.get_mother_handle()]:
person = None
if person_hdl:
person = self.r_db.get_person_from_handle(person_hdl)
if person:
table += self.display_spouse(person,
family, place_lat_long)
details = self.display_family_details(family, place_lat_long)
if details is not None:
table += details
return section
def display_family_details(self, family, place_lat_long):
"""
Display details about one family: family events, children, family LDS
ordinances, family attributes
"""
table = None
birthorder = self.report.options["birthorder"]
# display family events; such as marriage and divorce...
family_events = family.get_event_ref_list()
if family_events:
trow = Html("tr") + (
Html("td", " ", class_="ColumnType", inline=True),
Html("td", " ", class_="ColumnAttribute", inline=True),
Html("td", self.format_family_events(family_events,
place_lat_long),
class_="ColumnValue")
)
table = trow
# If the families pages are not output, display family notes
if not self.inc_families:
notelist = family.get_note_list()
for notehandle in notelist:
note = self.r_db.get_note_from_handle(notehandle)
if note:
trow = Html("tr") + (
Html("td", " ", class_="ColumnType", inline=True),
Html("td", self._("Narrative"),
class_="ColumnAttribute",
inline=True),
Html("td", self.get_note_format(note, True),
class_="ColumnValue")
)
table = table + trow if table is not None else trow
childlist = family.get_child_ref_list()
if childlist:
trow = Html("tr") + (
Html("td", " ", class_="ColumnType", inline=True),
Html("td", self._("Children"), class_="ColumnAttribute",
inline=True)
)
table = table + trow if table is not None else trow
tcell = Html("td", class_="ColumnValue")
trow += tcell
ordered = Html("ol")
tcell += ordered
childlist = [child_ref.ref for child_ref in childlist]
# add individual's children event places to family map...
if self.familymappages:
for handle in childlist:
child = self.r_db.get_person_from_handle(handle)
if child:
self._get_event_place(child, place_lat_long)
children = add_birthdate(self.r_db, childlist)
if birthorder:
children = sorted(children)
ordered.extend(
(Html("li") + self.display_child_link(chandle))
for birth_date, chandle in children
)
# family LDS ordinance list
family_lds_ordinance_list = family.get_lds_ord_list()
if family_lds_ordinance_list:
trow = Html("tr") + (
Html("td", " ", class_="ColumnType", inline=True),
Html("td", self._("LDS Ordinance"), class_="ColumnAttribute",
inline=True),
Html("td", self.dump_ordinance(family, "Family"),
class_="ColumnValue")
)
table = table + trow if table is not None else trow
# Family Attribute list
family_attribute_list = family.get_attribute_list()
if family_attribute_list:
trow = Html("tr") + (
Html("td", " ", class_="ColumnType", inline=True),
Html("td", self._("Attributes"), class_="ColumnAttribute",
inline=True)
)
table = table + trow if table is not None else trow
tcell = Html("td", class_="ColumnValue")
trow += tcell
# we do not need the section variable for this instance
# of Attributes...
dummy, attrtable = self.display_attribute_header()
tcell += attrtable
self.display_attr_list(family_attribute_list, attrtable)
return table
def complete_people(self, tcell, first_person, handle_list, uplink=True):
"""
completes the person column for classes EventListPage and EventPage
@param: tcell -- table cell from its caller
@param: first_person -- Not used any more, done via css
@param: handle_list -- handle list from the backlink of the event_handle
"""
for (classname, handle) in handle_list:
# personal event
if classname == "Person":
tcell += Html("span", self.new_person_link(handle, uplink),
class_="person", inline=True)
# family event
else:
_obj = self.r_db.get_family_from_handle(handle)
if _obj:
# husband and spouse in this example,
# are called father and mother
husband_handle = _obj.get_father_handle()
if husband_handle:
hlink = self.new_person_link(husband_handle, uplink)
spouse_handle = _obj.get_mother_handle()
if spouse_handle:
slink = self.new_person_link(spouse_handle, uplink)
if spouse_handle and husband_handle:
tcell += Html("span", hlink, class_="father",
inline=True)
tcell += Html("span", slink, class_="mother",
inline=True)
elif spouse_handle:
tcell += Html("span", slink, class_="mother",
inline=True)
elif husband_handle:
tcell += Html("span", hlink, class_="father",
inline=True)
return tcell
def dump_attribute(self, attr):
"""
dump attribute for object presented in display_attr_list()
@param: attr = attribute object
"""
trow = Html("tr")
trow.extend(
Html("td", data or " ", class_=colclass,
inline=True if (colclass == "Type" or "Sources") else False)
for (data, colclass) in [
(str(attr.get_type()), "ColumnType"),
(attr.get_value(), "ColumnValue"),
(self.dump_notes(attr.get_note_list()), "ColumnNotes"),
(self.get_citation_links(attr.get_citation_list()),
"ColumnSources")
]
)
return trow
def get_citation_links(self, citation_handle_list):
"""
get citation link from the citation handle list
@param: citation_handle_list = list of gen/lib/Citation
"""
text = ""
for citation_handle in citation_handle_list:
citation = self.r_db.get_citation_from_handle(citation_handle)
if citation:
index, key = self.bibli.add_reference(citation)
id_ = "%d%s" % (index+1, key)
text += ' <a href="#sref%s">%s</a>' % (id_, id_)
return text
def get_note_format(self, note, link_prefix_up):
"""
will get the note from the database, and will return either the
styled text or plain note
"""
self.report.link_prefix_up = link_prefix_up
text = ""
if note is not None:
# retrieve the body of the note
note_text = note.get()
# styled notes
htmlnotetext = self.styled_note(
note.get_styledtext(), note.get_format(),
contains_html=(note.get_type() == NoteType.HTML_CODE))
text = htmlnotetext or Html("p", note_text)
# return text of the note to its callers
return text
def styled_note(self, styledtext, styled_format, contains_html=False):
"""
styledtext : assumed a StyledText object to write
styled_format : = 0 : Flowed, = 1 : Preformatted
style_name : name of the style to use for default presentation
"""
text = str(styledtext)
if not text:
return ''
s_tags = styledtext.get_tags()
htmllist = Html("div", class_="grampsstylednote")
if contains_html:
markuptext = self._backend.add_markup_from_styled(text,
s_tags,
split='\n',
escape=False)
htmllist += markuptext
else:
markuptext = self._backend.add_markup_from_styled(text,
s_tags,
split='\n')
linelist = []
linenb = 1
for line in markuptext.split('\n'):
[line, sigcount] = process_spaces(line, styled_format)
if sigcount == 0:
# The rendering of an empty paragraph '<p></p>'
# is undefined so we use a non-breaking space
if linenb == 1:
linelist.append(' ')
htmllist.extend(Html('p') + linelist)
linelist = []
linenb = 1
else:
if linenb > 1:
linelist[-1] += '<br />'
linelist.append(line)
linenb += 1
if linenb > 1:
htmllist.extend(Html('p') + linelist)
# if the last line was blank, then as well as outputting
# the previous para, which we have just done,
# we also output a new blank para
if sigcount == 0:
linelist = [" "]
htmllist.extend(Html('p') + linelist)
return htmllist
def dump_notes(self, notelist):
"""
dump out of list of notes with very little elements of its own
@param: notelist -- list of notes
"""
if not notelist:
return Html("div")
# begin unordered list
notesection = Html("div")
for notehandle in notelist:
this_note = self.r_db.get_note_from_handle(notehandle)
if this_note is not None:
notesection.extend(Html("i", str(this_note.type),
class_="NoteType"))
notesection.extend(self.get_note_format(this_note, True))
return notesection
def event_header_row(self):
"""
creates the event header row for all events
"""
trow = Html("tr")
trow.extend(
Html("th", trans, class_=colclass, inline=True)
for trans, colclass in [
(self._("Event"), "ColumnEvent"),
(self._("Date"), "ColumnDate"),
(self._("Place"), "ColumnPlace"),
(self._("Description"), "ColumnDescription"),
(self._("Notes"), "ColumnNotes"),
(self._("Sources"), "ColumnSources")]
)
return trow
def display_event_row(self, event, event_ref, place_lat_long,
uplink, hyperlink, omit):
"""
display the event row for IndividualPage
@param: evt -- Event object from report database
@param: evt_ref -- Event reference
@param: place_lat_long -- For use in Family Map Pages. This will be None
if called from Family pages, which do not
create a Family Map
@param: uplink -- If True, then "../../../" is inserted in front
of the result.
@param: hyperlink -- Add a hyperlink or not
@param: omit -- Role to be omitted in output
"""
event_gid = event.get_gramps_id()
place_handle = event.get_place_handle()
if place_handle:
place = self.r_db.get_place_from_handle(place_handle)
if place:
self.append_to_place_lat_long(place, event, place_lat_long)
# begin event table row
trow = Html("tr")
# get event type and hyperlink to it or not?
etype = self._(event.get_type().xml_str())
event_role = event_ref.get_role()
if not event_role == omit:
etype += " (%s)" % event_role
event_hyper = self.event_link(event_ref.ref,
etype,
event_gid,
uplink) if hyperlink else etype
trow += Html("td", event_hyper, class_="ColumnEvent")
# get event data
event_data = self.get_event_data(event, event_ref, uplink)
trow.extend(
Html("td", data or " ", class_=colclass,
inline=(not data or colclass == "ColumnDate"))
for (label, colclass, data) in event_data
)
# get event notes
notelist = event.get_note_list()
notelist.extend(event_ref.get_note_list())
htmllist = self.dump_notes(notelist)
# if the event or event reference has an attribute attached to it,
# get the text and format it correctly?
attrlist = event.get_attribute_list()
attrlist.extend(event_ref.get_attribute_list())
for attr in attrlist:
htmllist.extend(Html("p",
_("%(type)s: %(value)s") % {
'type' : Html("b", attr.get_type()),
'value' : attr.get_value()
}))
#also output notes attached to the attributes
notelist = attr.get_note_list()
if notelist:
htmllist.extend(self.dump_notes(notelist))
trow += Html("td", htmllist, class_="ColumnNotes")
# get event source references
srcrefs = self.get_citation_links(event.get_citation_list()) or " "
trow += Html("td", srcrefs, class_="ColumnSources")
# return events table row to its callers
return trow
def append_to_place_lat_long(self, place, event, place_lat_long):
"""
Create a list of places with coordinates.
@param: place_lat_long -- for use in Family Map Pages. This will be None
if called from Family pages, which do not create a Family Map
"""
if place_lat_long is None:
return
place_handle = place.get_handle()
event_date = event.get_date_object()
# 0 = latitude, 1 = longitude, 2 - placetitle,
# 3 = place handle, 4 = event date, 5 = event type
found = any(data[3] == place_handle and data[4] == event_date
for data in place_lat_long)
if not found:
placetitle = _pd.display(self.r_db, place)
latitude = place.get_latitude()
longitude = place.get_longitude()
if latitude and longitude:
latitude, longitude = conv_lat_lon(latitude, longitude, "D.D8")
if latitude is not None:
etype = event.get_type()
# only allow Birth, Death, Census, Marriage,
# and Divorce events...
if etype in [EventType.BIRTH, EventType.DEATH,
EventType.CENSUS,
EventType.MARRIAGE, EventType.DIVORCE]:
place_lat_long.append([latitude, longitude, placetitle,
place_handle, event_date, etype])
def _get_event_place(self, person, place_lat_long):
"""
Retrieve from a person their events, and places for family map
@param: person -- Person object from the database
@param: place_lat_long -- For use in Family Map Pages. This will be
None if called from Family pages, which do
not create a Family Map
"""
if not person:
return
# check to see if this person is in the report database?
use_link = self.report.person_in_webreport(person.get_handle())
if use_link:
evt_ref_list = person.get_event_ref_list()
if evt_ref_list:
for evt_ref in evt_ref_list:
event = self.r_db.get_event_from_handle(evt_ref.ref)
if event:
pl_handle = event.get_place_handle()
if pl_handle:
place = self.r_db.get_place_from_handle(pl_handle)
if place:
self.append_to_place_lat_long(place, event,
place_lat_long)
def family_link(self, family_handle, name, gid=None, uplink=False):
"""
Create the url and link for FamilyPage
@param: family_handle -- The handle for the family to link
@param: name -- The family name
@param: gid -- The family gramps ID
@param: uplink -- If True, then "../../../" is inserted in front
of the result.
"""
name = html_escape(name)
if not self.noid and gid:
gid_html = Html("span", " [%s]" % gid, class_="grampsid",
inline=True)
else:
gid_html = ""
result = self.report.obj_dict.get(Family).get(family_handle)
if result is None:
# the family is not included in the webreport
return name + str(gid_html)
url = self.report.build_url_fname(result[0], uplink=uplink)
hyper = Html("a", name, href=url, title=name)
hyper += gid_html
return hyper
def get_family_string(self, family):
"""
Unused method ???
Returns a hyperlink for each person linked to the Family Page
@param: family -- The family
"""
husband, spouse = [False]*2
husband_handle = family.get_father_handle()
if husband_handle:
husband = self.r_db.get_person_from_handle(husband_handle)
else:
husband = None
spouse_handle = family.get_mother_handle()
if spouse_handle:
spouse = self.r_db.get_person_from_handle(spouse_handle)
else:
spouse = None
if husband:
husband_name = self.get_name(husband)
hlink = self.family_link(family.get_handle(),
husband_name, uplink=self.uplink)
if spouse:
spouse_name = self.get_name(spouse)
slink = self.family_link(family.get_handle(),
spouse_name, uplink=self.uplink)
title_str = ''
if husband and spouse:
title_str = '%s ' % hlink + self._("and") + ' %s' % slink
elif husband:
title_str = '%s ' % hlink
elif spouse:
title_str = '%s ' % slink
return title_str
def event_link(self, event_handle, event_title, gid=None, uplink=False):
"""
Creates a hyperlink for an event based on its type
@param: event_handle -- Event handle
@param: event_title -- Event title
@param: gid -- The gramps ID for the event
@param: uplink -- If True, then "../../../" is inserted in front
of the result.
"""
if not self.inc_events:
return event_title
url = self.report.build_url_fname_html(event_handle, "evt", uplink)
hyper = Html("a", event_title, href=url, title=event_title)
if not self.noid and gid:
hyper += Html("span", " [%s]" % gid, class_="grampsid", inline=True)
return hyper
def format_family_events(self, event_ref_list, place_lat_long):
"""
displays the event row for events such as marriage and divorce
@param: event_ref_list -- List of events reference
@param: place_lat_long -- For use in Family Map Pages. This will be None
if called from Family pages, which do not
create a Family Map
"""
with Html("table", class_="infolist eventlist") as table:
thead = Html("thead")
table += thead
# attach event header row
thead += self.event_header_row()
# begin table body
tbody = Html("tbody")
table += tbody
for evt_ref in event_ref_list:
event = self.r_db.get_event_from_handle(evt_ref.ref)
# add event body row
tbody += self.display_event_row(event, evt_ref, place_lat_long,
uplink=True, hyperlink=True,
omit=EventRoleType.FAMILY)
return table
def get_event_data(self, evt, evt_ref,
uplink, gid=None):
"""
retrieve event data from event and evt_ref
@param: evt -- Event from database
@param: evt_ref -- Event reference
@param: uplink -- If True, then "../../../" is inserted in front of
the result.
"""
place = None
place_handle = evt.get_place_handle()
if place_handle:
place = self.r_db.get_place_from_handle(place_handle)
place_hyper = None
if place:
place_name = _pd.display(self.r_db, place, evt.get_date_object())
place_hyper = self.place_link(place_handle, place_name,
uplink=uplink)
evt_desc = evt.get_description()
# wrap it all up and return to its callers
# position 0 = translatable label, position 1 = column class
# position 2 = data
return [(self._("Date"), "ColumnDate",
self.rlocale.get_date(evt.get_date_object())),
(self._("Place"), "ColumnPlace", place_hyper),
(self._("Description"), "ColumnDescription", evt_desc)]
def dump_ordinance(self, ldsobj, ldssealedtype):
"""
will dump the LDS Ordinance information for either
a person or a family ...
@param: ldsobj -- Either person or family
@param: ldssealedtype -- Either Sealed to Family or Spouse
"""
objectldsord = ldsobj.get_lds_ord_list()
if not objectldsord:
return None
# begin LDS ordinance table and table head
with Html("table", class_="infolist ldsordlist") as table:
thead = Html("thead")
table += thead
# begin HTML row
trow = Html("tr")
thead += trow
trow.extend(
Html("th", label, class_=colclass, inline=True)
for (label, colclass) in [
[self._("Type"), "ColumnLDSType"],
[self._("Date"), "ColumnDate"],
[self._("Temple"), "ColumnLDSTemple"],
[self._("Place"), "ColumnLDSPlace"],
[self._("Status"), "ColumnLDSStatus"],
[self._("Sources"), "ColumnLDSSources"]
]
)
# start table body
tbody = Html("tbody")
table += tbody
for ordobj in objectldsord:
place_hyper = " "
place_handle = ordobj.get_place_handle()
if place_handle:
place = self.r_db.get_place_from_handle(place_handle)
if place:
place_title = _pd.display(self.r_db, place)
place_hyper = self.place_link(
place_handle, place_title,
place.get_gramps_id(), uplink=True)
# begin ordinance rows
trow = Html("tr")
trow.extend(
Html("td", value or " ", class_=colclass,
inline=(not value or colclass == "ColumnDate"))
for (value, colclass) in [
(ordobj.type2xml(), "ColumnType"),
(self.rlocale.get_date(ordobj.get_date_object()),
"ColumnDate"),
(ordobj.get_temple(), "ColumnLDSTemple"),
(place_hyper, "ColumnLDSPlace"),
(ordobj.get_status(), "ColumnLDSStatus"),
(self.get_citation_links(ordobj.get_citation_list()),
"ColumnSources")
]
)
tbody += trow
return table
def write_srcattr(self, srcattr_list):
"""
Writes out the srcattr for the different objects
@param: srcattr_list -- List of source attributes
"""
if len(srcattr_list) == 0:
return None
# begin data map division and section title...
with Html("div", class_="subsection", id="data_map") as section:
section += Html("h4", self._("Attributes"), inline=True)
with Html("table", class_="infolist") as table:
section += table
thead = Html("thead")
table += thead
trow = Html("tr") + (
Html("th", self._("Key"), class_="ColumnAttribute",
inline=True),
Html("th", self._("Value"), class_="ColumnValue",
inline=True)
)
thead += trow
tbody = Html("tbody")
table += tbody
for srcattr in srcattr_list:
trow = Html("tr") + (
Html("td", str(srcattr.get_type()),
class_="ColumnAttribute", inline=True),
Html("td", srcattr.get_value(),
class_="ColumnValue", inline=True)
)
tbody += trow
return section
def source_link(self, source_handle, source_title,
gid=None, cindex=None, uplink=False):
"""
Creates a link to the source object
@param: source_handle -- Source handle from database
@param: source_title -- Title from the source object
@param: gid -- Source gramps id from the source object
@param: cindex -- Count index
@param: uplink -- If True, then "../../../" is inserted in front
of the result.
"""
url = self.report.build_url_fname_html(source_handle, "src", uplink)
hyper = Html("a", source_title,
href=url,
title=source_title)
# if not None, add name reference to hyperlink element
if cindex:
hyper.attr += ' name ="sref%d"' % cindex
# add GRAMPS ID
if not self.noid and gid:
hyper += Html("span", ' [%s]' % gid, class_="grampsid", inline=True)
return hyper
def display_addr_list(self, addrlist, showsrc):
"""
Display a person's or repository's addresses ...
@param: addrlist -- a list of address handles
@param: showsrc -- True = show sources
False = do not show sources
None = djpe
"""
if not addrlist:
return None
# begin addresses division and title
with Html("div", class_="subsection", id="Addresses") as section:
section += Html("h4", self._("Addresses"), inline=True)
# write out addresses()
section += self.dump_addresses(addrlist, showsrc)
# return address division to its caller
return section
def dump_addresses(self, addrlist, showsrc):
"""
will display an object's addresses, url list, note list,
and source references.
@param: addrlist = either person or repository address list
@param: showsrc = True -- person and their sources
False -- repository with no sources
None -- Address Book address with sources
"""
if not addrlist:
return None
# begin summaryarea division
with Html("div", id="AddressTable") as summaryarea:
# begin address table
with Html("table") as table:
summaryarea += table
# get table class based on showsrc
if showsrc == True:
table.attr = 'class = "infolist addrlist"'
elif showsrc == False:
table.attr = 'class = "infolist repolist"'
else:
table.attr = 'class = "infolist addressbook"'
# begin table head
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
addr_header = [
[self._("Date"), "Date"],
[self._("Street"), "StreetAddress"],
[self._("Locality"), "Locality"],
[self._("City"), "City"],
[self._("State/ Province"), "State"],
[self._("County"), "County"],
[self._("Postal Code"), "Postalcode"],
[self._("Country"), "Cntry"],
[self._("Phone"), "Phone"]]
# True, False, or None ** see docstring for explanation
if showsrc in [True, None]:
addr_header.append([self._("Sources"), "Sources"])
trow.extend(
Html("th", self._(label),
class_="Colummn" + colclass, inline=True)
for (label, colclass) in addr_header
)
# begin table body
tbody = Html("tbody")
table += tbody
# get address list from an object; either repository or person
for address in addrlist:
trow = Html("tr")
tbody += trow
addr_data_row = [
(self.rlocale.get_date(address.get_date_object()),
"ColumnDate"),
(address.get_street(), "ColumnStreetAddress"),
(address.get_locality(), "ColumnLocality"),
(address.get_city(), "ColumnCity"),
(address.get_state(), "ColumnState"),
(address.get_county(), "ColumnCounty"),
(address.get_postal_code(), "ColumnPostalCode"),
(address.get_country(), "ColumnCntry"),
(address.get_phone(), "ColumnPhone")
]
# get source citation list
if showsrc in [True, None]:
addr_data_row.append(
[self.get_citation_links(
address.get_citation_list()),
"ColumnSources"])
trow.extend(
Html("td", value or " ",
class_=colclass, inline=True)
for (value, colclass) in addr_data_row
)
# address: notelist
if showsrc is not None:
notelist = self.display_note_list(
address.get_note_list())
if notelist is not None:
summaryarea += notelist
return summaryarea
def addressbook_link(self, person_handle, uplink=False):
"""
Creates a hyperlink for an address book link based on person's handle
@param: person_handle -- Person's handle from the database
@param: uplink -- If True, then "../../../" is inserted in front
of the result.
"""
url = self.report.build_url_fname_html(person_handle, "addr", uplink)
person = self.r_db.get_person_from_handle(person_handle)
person_name = self.get_name(person)
# return addressbook hyperlink to its caller
return Html("a", person_name, href=url, title=html_escape(person_name))
def get_copyright_license(self, copyright_, uplink=False):
"""
Will return either the text or image of the copyright license
@param: copyright_ -- The kind of copyright
@param: uplink -- If True, then "../../../" is inserted in front
of the result.
"""
text = ''
if copyright_ == 0:
if self.author:
year = Today().get_year()
text = '© %(year)d %(person)s' % {
'person' : self.author,
'year' : year}
elif 0 < copyright_ < len(_CC):
# Note. This is a URL
fname = "/".join(["images", "somerights20.gif"])
url = self.report.build_url_fname(fname, None, uplink=False)
text = _CC[copyright_] % {'gif_fname' : url}
# return text or image to its callers
return text
def get_name(self, person, maiden_name=None):
""" I5118
Return person's name, unless maiden_name given, unless married_name
listed.
@param: person -- person object from database
@param: maiden_name -- Female's family surname
"""
# get name format for displaying names
name_format = self.report.options['name_format']
# Get all of a person's names
primary_name = person.get_primary_name()
married_name = None
names = [primary_name] + person.get_alternate_names()
for name in names:
if int(name.get_type()) == NameType.MARRIED:
married_name = name
break # use first
# Now, decide which to use:
if maiden_name is not None:
if married_name is not None:
name = Name(married_name)
else:
name = Name(primary_name)
surname_obj = name.get_primary_surname()
surname_obj.set_surname(maiden_name)
else:
name = Name(primary_name)
name.set_display_as(name_format)
return _nd.display_name(name)
def display_attribute_header(self):
"""
Display the attribute section and its table header
"""
# begin attributes division and section title
with Html("div", class_="subsection", id="attributes") as section:
section += Html("h4", self._("Attributes"), inline=True)
# begin attributes table
with Html("table", class_="infolist attrlist") as attrtable:
section += attrtable
thead = Html("thead")
attrtable += thead
trow = Html("tr")
thead += trow
trow.extend(
Html("th", label, class_=colclass, inline=True)
for (label, colclass) in [
(self._("Type"), "ColumnType"),
(self._("Value"), "ColumnValue"),
(self._("Notes"), "ColumnNotes"),
(self._("Sources"), "ColumnSources")]
)
return section, attrtable
def display_attr_list(self, attrlist,
attrtable):
"""
Will display a list of attributes
@param: attrlist -- a list of attributes
@param: attrtable -- the table element that is being added to
"""
tbody = Html("tbody")
attrtable += tbody
tbody.extend(
self.dump_attribute(attr) for attr in attrlist
)
def write_footer(self, date):
"""
Will create and display the footer section of each page...
@param: bottom -- whether to specify location of footer section or not?
"""
# begin footer division
with Html("div", id="footer") as footer:
footer_note = self.report.options['footernote']
if footer_note:
note = self.get_note_format(
self.r_db.get_note_from_gramps_id(footer_note),
False
)
user_footer = Html("div", id='user_footer')
footer += user_footer
# attach note
user_footer += note
msg = self._('Generated by %(gramps_home_html_start)s'
'Gramps%(html_end)s %(version)s'
) % {'gramps_home_html_start' :
'<a href="' + URL_HOMEPAGE + '">',
'html_end' : '</a>',
'version' : VERSION}
if date is not None:
msg += "<br />"
last_modif = datetime.datetime.fromtimestamp(date).strftime(
'%Y-%m-%d %H:%M:%S')
msg += self._('Last change was the %(date)s') % {
'date' : last_modif}
else:
msg += self._(' on %(date)s') % {
'date' : self.rlocale.get_date(Today())}
origin1 = self.report.filter.get_name(self.rlocale)
filt_number = self.report.options['filter']
# optional "link-home" feature; see bug report #2736
if self.report.options['linkhome']:
center_person = self.r_db.get_person_from_gramps_id(
self.report.options['pid'])
if (center_person and
self.report.person_in_webreport(center_person.handle)):
center_person_url = self.report.build_url_fname_html(
center_person.handle, "ppl", self.uplink)
person_name = self.get_name(center_person)
if filt_number > 0 and filt_number < 5:
subject_url = '<a href="' + center_person_url + '">'
subject_url += origin1 + '</a>'
else:
subject_url = origin1
msg += self._(
'%(http_break)sCreated for %(subject_url)s') % {
'http_break' : '<br />',
'subject_url' : subject_url}
else:
msg += self._(
'%(http_break)sCreated for %(subject_url)s') % {
'http_break' : '<br />',
'subject_url' : origin1}
# creation author
footer += Html("p", msg, id='createdate')
# get copyright license for all pages
copy_nr = self.report.copyright
text = ''
if copy_nr == 0:
if self.author:
year = Today().get_year()
text = '© %(year)d %(person)s' % {
'person' : self.author, 'year' : year}
elif copy_nr < len(_CC):
# Note. This is a URL
fname = "/".join(["images", "somerights20.gif"])
url = self.report.build_url_fname(fname, None, self.uplink)
text = _CC[copy_nr] % {'gif_fname' : url}
footer += Html("p", text, id='copyright')
# return footer to its callers
return footer
def write_header(self, title):
"""
Note. 'title' is used as currentsection in the navigation links and
as part of the header title.
@param: title -- Is the title of the web page
"""
# begin each html page...
xmllang = xml_lang()
page, head, body = Html.page('%s - %s' %
(html_escape(self.title_str.strip()),
html_escape(title)),
self.report.encoding,
xmllang, cms=self.usecms)
# temporary fix for .php parsing error
if self.ext in [".php", ".php3", ".cgi"]:
del page[0]
# Header constants
_meta1 = 'name ="viewport" content="width=device-width; '
_meta1 += 'height=device-height; initial-scale=0.1; '
_meta1 += 'maximum-scale=10.0; user-scalable=yes"'
_meta2 = 'name ="apple-mobile-web-app-capable" content="yes"'
_meta3 = 'name="generator" content="%s %s %s"' % (
PROGRAM_NAME, VERSION, URL_HOMEPAGE)
_meta4 = 'name="author" content="%s"' % self.author
# create additional meta tags
meta = Html("meta", attr=_meta1) + (
Html("meta", attr=_meta2, indent=False),
Html("meta", attr=_meta3, indent=False),
Html("meta", attr=_meta4, indent=False)
)
# Link to _NARRATIVESCREEN stylesheet
fname = "/".join(["css", _NARRATIVESCREEN])
url2 = self.report.build_url_fname(fname, None, self.uplink)
# Link to _NARRATIVEPRINT stylesheet
fname = "/".join(["css", _NARRATIVEPRINT])
url3 = self.report.build_url_fname(fname, None, self.uplink)
# Link to GRAMPS favicon
fname = "/".join(['images', 'favicon2.ico'])
url4 = self.report.build_url_image("favicon2.ico",
"images", self.uplink)
# create stylesheet and favicon links
links = Html("link", type="image/x-icon",
href=url4, rel="shortcut icon") + (
Html("link", type="text/css", href=url2,
media="screen", rel="stylesheet", indent=False),
Html("link", type="text/css", href=url3,
media='print', rel="stylesheet", indent=False)
)
# Link to Navigation Menus stylesheet
if CSS[self.report.css]["navigation"]:
fname = "/".join(["css", "narrative-menus.css"])
url = self.report.build_url_fname(fname, None, self.uplink)
links += Html("link", type="text/css", href=url,
media="screen", rel="stylesheet", indent=False)
# add additional meta and link tags
head += meta
head += links
# begin header section
headerdiv = Html("div", id='header') + (
Html("h1", html_escape(self.title_str), id="SiteTitle", inline=True)
)
body += headerdiv
header_note = self.report.options['headernote']
if header_note:
note = self.get_note_format(
self.r_db.get_note_from_gramps_id(header_note),
False)
user_header = Html("div", id='user_header')
headerdiv += user_header
# attach note
user_header += note
# Begin Navigation Menu--
# is the style sheet either Basic-Blue or Visually Impaired,
# and menu layout is Drop Down?
if (self.report.css == _("Basic-Blue") or
self.report.css == _("Visually Impaired")
) and self.report.navigation == "dropdown":
body += self.display_drop_menu()
else:
body += self.display_nav_links(title)
# return page, head, and body to its classes...
return page, head, body
def display_nav_links(self, currentsection):
"""
Creates the navigation menu
@param: currentsection = which menu item are you on
"""
# include repositories or not?
inc_repos = True
if (not self.report.inc_repository or
not len(self.r_db.get_repository_handles())):
inc_repos = False
# create media pages...
_create_media_link = False
if self.create_media:
_create_media_link = True
if self.create_thumbs_only:
_create_media_link = False
# create link to web calendar pages...
_create_calendar_link = False
if self.usecal:
_create_calendar_link = True
self.target_cal_uri += "/index"
# Determine which menu items will be available?
# Menu items have been adjusted to concide with Gramps Navigation
# Sidebar order...
navs = [
(self.report.index_fname, self._("Html|Home"),
self.report.use_home),
(self.report.intro_fname, self._("Introduction"),
self.report.use_intro),
('individuals', self._("Individuals"), True),
(self.report.surname_fname, self._("Surnames"), True),
('families', self._("Families"), self.report.inc_families),
('events', self._("Events"), self.report.inc_events),
('places', self._("Places"), True),
('sources', self._("Sources"), True),
('repositories', self._("Repositories"), inc_repos),
('media', self._("Media"), _create_media_link),
('thumbnails', self._("Thumbnails"), self.create_media),
('download', self._("Download"), self.report.inc_download),
("addressbook", self._("Address Book"),
self.report.inc_addressbook),
('contact', self._("Contact"), self.report.use_contact),
('statistics', self._("Statistics"), True),
(self.target_cal_uri, self._("Web Calendar"), self.usecal)
]
# Remove menu sections if they are not being created?
navs = ((url_text, nav_text)
for url_text, nav_text, cond in navs if cond)
menu_items = [[url, text] for url, text in navs]
number_items = len(menu_items)
num_cols = 10
num_rows = ((number_items // num_cols) + 1)
# begin navigation menu division...
with Html("div", class_="wrapper",
id="nav", role="navigation") as navigation:
with Html("div", class_="container") as container:
index = 0
for rows in range(num_rows):
unordered = Html("ul", class_="menu", id="dropmenu")
cols = 0
while cols <= num_cols and index < number_items:
url_fname, nav_text = menu_items[index]
hyper = self.get_nav_menu_hyperlink(url_fname, nav_text)
# Define 'currentsection' to correctly set navlink item
# CSS id 'CurrentSection' for Navigation styling.
# Use 'self.report.cur_fname' to determine
# 'CurrentSection' for individual elements for
# Navigation styling.
# Figure out if we need <li class = "CurrentSection">
# or just <li>
check_cs = False
if nav_text == currentsection:
check_cs = True
elif nav_text == _("Surnames"):
if "srn" in self.report.cur_fname:
check_cs = True
elif _("Surnames") in currentsection:
check_cs = True
elif nav_text == _("Individuals"):
if "ppl" in self.report.cur_fname:
check_cs = True
elif nav_text == _("Families"):
if "fam" in self.report.cur_fname:
check_cs = True
elif nav_text == _("Sources"):
if "src" in self.report.cur_fname:
check_cs = True
elif nav_text == _("Places"):
if "plc" in self.report.cur_fname:
check_cs = True
elif nav_text == _("Events"):
if "evt" in self.report.cur_fname:
check_cs = True
elif nav_text == _("Media"):
if "img" in self.report.cur_fname:
check_cs = True
elif nav_text == _("Address Book"):
if "addr" in self.report.cur_fname:
check_cs = True
temp_cs = 'class = "CurrentSection"'
check_cs = temp_cs if check_cs else False
if check_cs:
unordered.extend(
Html("li", hyper, attr=check_cs, inline=True)
)
else:
unordered.extend(
Html("li", hyper, inline=True)
)
index += 1
cols += 1
if rows == num_rows - 1:
prv = Html('<a onclick="history.go(-1);">%s</a>' %
self._("Previous"))
nxt = Html('<a onclick="history.go(+1);">%s</a>' %
self._("Next"))
unordered.extend(Html("li", prv, inline=True))
unordered.extend(Html("li", nxt, inline=True))
container += unordered
navigation += container
return navigation
def display_drop_menu(self):
"""
Creates the Drop Down Navigation Menu
"""
# include repositories or not?
inc_repos = True
if (not self.report.inc_repository or
not len(self.r_db.get_repository_handles())):
inc_repos = False
# create media pages...
_create_media_link = False
if self.create_media:
_create_media_link = True
if self.create_thumbs_only:
_create_media_link = False
personal = [
(self.report.intro_fname, self._("Introduction"),
self.report.use_intro),
("individuals", self._("Individuals"), True),
(self.report.surname_fname, self._("Surnames"), True),
("families", self._("Families"), self.report.inc_families)
]
personal = ((url_text, nav_text)
for url_text, nav_text, cond in personal if cond)
personal = [[url, text] for url, text in personal]
navs1 = [
("events", self._("Events"), self.report.inc_events),
("places", self._("Places"), True),
("sources", self._("Sources"), True),
("repositories", self._("Repositories"), inc_repos)
]
navs1 = ((url_text, nav_text)
for url_text, nav_text, cond in navs1 if cond)
navs1 = [[url, text] for url, text in navs1]
media = [
("media", self._("Media"), _create_media_link),
("thumbnails", self._("Thumbnails"), True)
]
media = ((url_text, nav_text)
for url_text, nav_text, cond in media if cond)
media = [[url, text] for url, text in media]
misc = [
('download', self._("Download"), self.report.inc_download),
("addressbook", self._("Address Book"), self.report.inc_addressbook)
]
misc = ((url_text, nav_text)
for url_text, nav_text, cond in misc if cond)
misc = [[url, text] for url, text in misc]
contact = [
('contact', self._("Contact"), self.report.use_contact)
]
contact = ((url_text, nav_text)
for url_text, nav_text, cond in contact if cond)
contact = [[url, text] for url, text in contact]
# begin navigation menu division...
with Html("div", class_="wrapper",
id="nav", role="navigation") as navigation:
with Html("div", class_="container") as container:
unordered = Html("ul", class_="menu", id="dropmenu")
if self.report.use_home:
list_html = Html("li",
self.get_nav_menu_hyperlink(
self.report.index_fname,
self._("Html|Home")))
unordered += list_html
# add personal column
self.get_column_data(unordered, personal, self._("Personal"))
if len(navs1):
for url_fname, nav_text in navs1:
unordered.extend(
Html("li", self.get_nav_menu_hyperlink(url_fname,
nav_text),
inline=True)
)
# add media column
self.get_column_data(unordered, media, self._("Media"))
# add miscellaneous column
self.get_column_data(unordered, misc, self._("Miscellaneous"))
# add contact column
self.get_column_data(unordered, contact, _("Contact"))
container += unordered
navigation += container
return navigation
def add_image(self, option_name, height=0):
"""
Will add an image (if present) to the page
@param: option_name -- The name of the report option
@param: height -- Height of the image
"""
pic_id = self.report.options[option_name]
if pic_id:
obj = self.r_db.get_media_from_gramps_id(pic_id)
if obj is None:
return None
mime_type = obj.get_mime_type()
if mime_type and mime_type.startswith("image"):
try:
newpath, thumb_path = self.report.prepare_copy_media(obj)
self.report.copy_file(media_path_full(
self.r_db, obj.get_path()), newpath)
# begin image
image = Html("img")
image.attr = ''
if height:
image.attr += 'height = "%d"' % height
descr = html_escape(obj.get_description())
newpath = self.report.build_url_fname(newpath)
image.attr += ' src = "%s" alt = "%s"' % (newpath, descr)
# return an image
return image
except (IOError, OSError) as msg:
self.r_user.warn(_("Could not add photo to page"),
str(msg))
# no image to return
return None
def media_ref_rect_regions(self, handle):
"""
GRAMPS feature #2634 -- attempt to highlight subregions in media
objects and link back to the relevant web page.
This next section of code builds up the "records" we'll need to
generate the html/css code to support the subregions
@param: handle -- The media handle to use
"""
# get all of the backlinks to this media object; meaning all of
# the people, events, places, etc..., that use this image
_region_items = set()
for (classname, newhandle) in self.r_db.find_backlink_handles(
handle,
include_classes=["Person", "Family", "Event", "Place"]):
# for each of the backlinks, get the relevant object from the db
# and determine a few important things, such as a text name we
# can use, and the URL to a relevant web page
_obj = None
_name = ""
_linkurl = "#"
if classname == "Person":
# Is this a person for whom we have built a page:
if self.report.person_in_webreport(newhandle):
# If so, let's add a link to them:
_obj = self.r_db.get_person_from_handle(newhandle)
if _obj:
# What is the shortest possible name we could use
# for this person?
_name = (_obj.get_primary_name().get_call_name() or
_obj.get_primary_name().get_first_name() or
self._("Unknown")
)
_linkurl = self.report.build_url_fname_html(_obj.handle,
"ppl", True)
elif classname == "Family":
_obj = self.r_db.get_family_from_handle(newhandle)
partner1_handle = _obj.get_father_handle()
partner2_handle = _obj.get_mother_handle()
partner1 = None
partner2 = None
if partner1_handle:
partner1 = self.r_db.get_person_from_handle(
partner1_handle)
if partner2_handle:
partner2 = self.r_db.get_person_from_handle(
partner2_handle)
if partner2 and partner1:
_name = partner1.get_primary_name().get_first_name()
_linkurl = self.report.build_url_fname_html(partner1_handle,
"ppl", True)
elif partner1:
_name = partner1.get_primary_name().get_first_name()
_linkurl = self.report.build_url_fname_html(partner1_handle,
"ppl", True)
elif partner2:
_name = partner2.get_primary_name().get_first_name()
_linkurl = self.report.build_url_fname_html(partner2_handle,
"ppl", True)
if not _name:
_name = self._("Unknown")
elif classname == "Event":
_obj = self.r_db.get_event_from_handle(newhandle)
_name = _obj.get_description()
if not _name:
_name = self._("Unknown")
_linkurl = self.report.build_url_fname_html(_obj.handle,
"evt", True)
elif classname == "Place":
_obj = self.r_db.get_place_from_handle(newhandle)
_name = _pd.display(self.r_db, _obj)
if not _name:
_name = self._("Unknown")
_linkurl = self.report.build_url_fname_html(newhandle,
"plc", True)
# continue looking through the loop for an object...
if _obj is None:
continue
# get a list of all media refs for this object
media_list = _obj.get_media_list()
# go media refs looking for one that points to this image
for mediaref in media_list:
# is this mediaref for this image? do we have a rect?
if mediaref.ref == handle and mediaref.rect is not None:
(coord_x1, coord_y1, coord_x2, coord_y2) = mediaref.rect
# Gramps gives us absolute coordinates,
# but we need relative width + height
width = coord_x2 - coord_x1
height = coord_y2 - coord_y1
# remember all this information, cause we'll need
# need it later when we output the <li>...</li> tags
item = (_name, coord_x1, coord_y1, width, height, _linkurl)
_region_items.add(item)
# End of code that looks for and prepares the media object regions
# return media rectangles to its callers
# bug 8950 : it seems it's better to sort on name
# + coords of the rectangle.
def sort_by_name_and_rectangle(obj):
"""
Sort by name and rectangle
@param: obj -- The object reference
"""
return(obj[0], obj[1], obj[2], obj[3], obj[4])
return sorted(_region_items,
key=lambda x: sort_by_name_and_rectangle(x))
def media_ref_region_to_object(self, media_handle, obj):
"""
Return a region of this image if it refers to this object.
@param: media_handle -- The media handle to use
@param: obj -- The object reference
"""
# get a list of all media refs for this object
for mediaref in obj.get_media_list():
# is this mediaref for this image? do we have a rect?
if (mediaref.ref == media_handle and
mediaref.rect is not None):
return mediaref.rect # (x1, y1, x2, y2)
return None
def disp_first_img_as_thumbnail(self, photolist, object_):
"""
Return the Html of the first image of photolist that is
associated with object. First image might be a region in an
image. Or, the first image might have regions defined in it.
@param: photolist -- The list of media
@param: object_ -- The object reference
"""
if not photolist or not self.create_media:
return None
photo_handle = photolist[0].get_reference_handle()
photo = self.r_db.get_media_from_handle(photo_handle)
mime_type = photo.get_mime_type()
descr = photo.get_description()
# begin snapshot division
with Html("div", class_="snapshot") as snapshot:
if mime_type:
region = self.media_ref_region_to_object(photo_handle, object_)
if region:
# make a thumbnail of this region
newpath = self.copy_thumbnail(photo_handle, photo, region)
newpath = self.report.build_url_fname(newpath, uplink=True)
snapshot += self.media_link(photo_handle, newpath, descr,
uplink=self.uplink,
usedescr=False)
else:
real_path, newpath = self.report.prepare_copy_media(photo)
newpath = self.report.build_url_fname(newpath, uplink=True)
# FIXME: There doesn't seem to be any point in highlighting
# a sub-region in the thumbnail and linking back to the
# person or whatever. First it is confusing when the link
# probably has nothing to do with the page on which the
# thumbnail is displayed, and second on a thumbnail it is
# probably too small to see, and third, on the thumbnail,
# the link is shown above the image (which is pretty
# useless!)
_region_items = self.media_ref_rect_regions(photo_handle)
if len(_region_items):
with Html("div", id="GalleryDisplay") as mediadisplay:
snapshot += mediadisplay
ordered = Html("ol", class_="RegionBox")
mediadisplay += ordered
while len(_region_items):
(name, coord_x, coord_y,
width, height, linkurl) = _region_items.pop()
ordered += Html("li",
style="left:%d%%; top:%d%%; "
"width:%d%%; height:%d%%;" % (
coord_x, coord_y,
width, height))
ordered += Html("a", name, href=linkurl)
# Need to add link to mediadisplay to get the links:
mediadisplay += self.media_link(photo_handle,
newpath, descr,
self.uplink, False)
else:
try:
# Begin hyperlink. Description is given only for
# the purpose of the alt tag in img element
snapshot += self.media_link(photo_handle, newpath,
descr,
uplink=self.uplink,
usedescr=False)
except (IOError, OSError) as msg:
self.r_user.warn(_("Could not add photo to page"),
str(msg))
else:
# begin hyperlink
snapshot += self.doc_link(photo_handle, descr,
uplink=self.uplink, usedescr=False)
# return snapshot division to its callers
return snapshot
def disp_add_img_as_gallery(self, photolist, object_):
"""
Display additional image as gallery
@param: photolist -- The list of media
@param: object_ -- The object reference
"""
if not photolist or not self.create_media:
return None
# make referenced images have the same order as in media list:
photolist_handles = {}
for mediaref in photolist:
photolist_handles[mediaref.get_reference_handle()] = mediaref
photolist_ordered = []
for photoref in copy.copy(object_.get_media_list()):
if photoref.ref in photolist_handles:
photo = photolist_handles[photoref.ref]
photolist_ordered.append(photo)
try:
photolist.remove(photo)
except ValueError:
LOG.warning("Error trying to remove '%s' from photolist",
photo)
# and add any that are left (should there be any?)
photolist_ordered += photolist
# begin individualgallery division and section title
with Html("div", class_="subsection", id="indivgallery") as section:
section += Html("h4", self._("Media"), inline=True)
displayed = []
for mediaref in photolist_ordered:
photo_handle = mediaref.get_reference_handle()
photo = self.r_db.get_media_from_handle(photo_handle)
if photo_handle in displayed:
continue
mime_type = photo.get_mime_type()
# get media description
descr = photo.get_description()
if mime_type:
try:
# create thumbnail url
# extension needs to be added as it is not already there
url = self.report.build_url_fname(photo_handle, "thumb",
True) + ".png"
# begin hyperlink
section += self.media_link(photo_handle, url,
descr, uplink=self.uplink,
usedescr=True)
except (IOError, OSError) as msg:
self.r_user.warn(_("Could not add photo to page"),
str(msg))
else:
try:
# begin hyperlink
section += self.doc_link(photo_handle, descr,
uplink=self.uplink)
except (IOError, OSError) as msg:
self.r_user.warn(_("Could not add photo to page"),
str(msg))
displayed.append(photo_handle)
# add fullclear for proper styling
section += FULLCLEAR
# return indivgallery division to its caller
return section
def display_note_list(self, notelist=None):
"""
Display note list
@param: notelist -- The list of notes
"""
if not notelist:
return None
# begin narrative division
with Html("div", class_="subsection narrative") as section:
for notehandle in notelist:
note = self.r_db.get_note_from_handle(notehandle)
if note:
note_text = self.get_note_format(note, True)
# add section title
section += Html("h4", self._("Narrative"), inline=True)
# attach note
section += note_text
# return notes to its callers
return section
def display_url_list(self, urllist=None):
"""
Display URL list
@param: urllist -- The list of urls
"""
if not urllist:
return None
# begin web links division
with Html("div", class_="subsection", id="WebLinks") as section:
section += Html("h4", self._("Web Links"), inline=True)
with Html("table", class_="infolist weblinks") as table:
section += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
trow.extend(
Html('th', label, class_=colclass, inline=True)
for (label, colclass) in [
(self._("Type"), "ColumnType"),
(self._("Description"), "ColumnDescription")
]
)
tbody = Html("tbody")
table += tbody
for url in urllist:
trow = Html("tr")
tbody += trow
_type = self._(url.get_type().xml_str())
uri = url.get_path()
descr = url.get_description()
# Email address
if _type == UrlType.EMAIL:
if not uri.startswith("mailto:"):
uri = "mailto:%(email)s" % {'email' : uri}
# Web Site address
elif _type == UrlType.WEB_HOME:
if not (uri.startswith("http://") or
uri.startswith("https://")):
uri = "http://%(website)s" % {"website" : uri}
# FTP server address
elif _type == UrlType.WEB_FTP:
if not (uri.startswith("ftp://") or
uri.startswith("ftps://")):
uri = "ftp://%(ftpsite)s" % {"ftpsite" : uri}
descr = Html("p", html_escape(descr)) + (
Html("a", self._(" [Click to Go]"), href=uri, title=uri)
)
trow.extend(
Html("td", data, class_=colclass, inline=True)
for (data, colclass) in [
(str(_type), "ColumnType"),
(descr, "ColumnDescription")
]
)
return section
def display_lds_ordinance(self, db_obj_):
"""
Display LDS information for a person or family
@param: db_obj_ -- The database object
"""
ldsordlist = db_obj_.lds_ord_list
if not ldsordlist:
return None
# begin LDS Ordinance division and section title
with Html("div", class_="subsection", id="LDSOrdinance") as section:
section += Html("h4", _("Latter-Day Saints/ LDS Ordinance"),
inline=True)
# ump individual LDS ordinance list
section += self.dump_ordinance(db_obj_, "Person")
# return section to its caller
return section
def display_ind_sources(self, srcobj):
"""
Will create the "Source References" section for an object
@param: srcobj -- Sources object
"""
list(map(
lambda i: self.bibli.add_reference(
self.r_db.get_citation_from_handle(i)),
srcobj.get_citation_list()))
sourcerefs = self.display_source_refs(self.bibli)
# return to its callers
return sourcerefs
# Only used in IndividualPage.display_ind_sources(),
# and MediaPage.display_media_sources()
def display_source_refs(self, bibli):
"""
Display source references
@param: bibli -- List of sources
"""
if bibli.get_citation_count() == 0:
return None
with Html("div", class_="subsection", id="sourcerefs") as section:
section += Html("h4", self._("Source References"), inline=True)
ordered = Html("ol")
cindex = 0
citationlist = bibli.get_citation_list()
for citation in citationlist:
cindex += 1
# Add this source and its references to the page
source = self.r_db.get_source_from_handle(
citation.get_source_handle())
if source is not None:
if source.get_author():
authorstring = source.get_author() + ": "
else:
authorstring = ""
list_html = Html("li",
self.source_link(
source.get_handle(),
authorstring + source.get_title(),
source.get_gramps_id(), cindex,
uplink=self.uplink))
else:
list_html = Html("li", "None")
ordered1 = Html("ol")
citation_ref_list = citation.get_ref_list()
for key, sref in citation_ref_list:
cit_ref_li = Html("li", id="sref%d%s" % (cindex, key))
tmp = Html("ul")
conf = conf_strings.get(sref.confidence, self._('Unknown'))
if conf == conf_strings[Citation.CONF_NORMAL]:
conf = None
else:
conf = _(conf)
for (label, data) in [[self._("Date"),
self.rlocale.get_date(sref.date)],
[self._("Page"), sref.page],
[self._("Confidence"), conf]]:
if data:
tmp += Html("li", "%s: %s" % (label, data))
if self.create_media:
for media_ref in sref.get_media_list():
media_handle = media_ref.get_reference_handle()
media = self.r_db.get_media_from_handle(
media_handle)
if media:
mime_type = media.get_mime_type()
if mime_type:
if mime_type.startswith("image/"):
real_path, newpath = \
self.report.prepare_copy_media(
media)
newpath = self.report.build_url_fname(
newpath, uplink=self.uplink)
dest_dir = os.path.dirname(
self.report.cur_fname)
if dest_dir:
newpath = os.path.join(dest_dir,
newpath)
self.report.copy_file(
media_path_full(self.r_db,
media.get_path()),
newpath)
tmp += Html("li",
self.media_link(
media_handle,
newpath,
media.get_description(),
self.uplink,
usedescr=False),
inline=True)
else:
tmp += Html("li",
self.doc_link(
media_handle,
media.get_description(),
self.uplink,
usedescr=False),
inline=True)
for handle in sref.get_note_list():
this_note = self.r_db.get_note_from_handle(handle)
if this_note is not None:
tmp += Html("li",
"%s: %s" % (
str(this_note.get_type()),
self.get_note_format(this_note,
True)
))
if tmp:
cit_ref_li += tmp
ordered1 += cit_ref_li
if citation_ref_list:
list_html += ordered1
ordered += list_html
section += ordered
# return section to its caller
return section
def display_references(self, handlelist,
uplink=False):
"""
Display references for the current objects
@param: handlelist -- List of handles
@param: uplink -- If True, then "../../../" is inserted in front of
the result.
"""
if not handlelist:
return None
# begin references division and title
with Html("div", class_="subsection", id="references") as section:
section += Html("h4", self._("References"), inline=True)
ordered = Html("ol")
section += ordered
sortlist = sorted(handlelist,
key=lambda x: self.rlocale.sort_key(x[1]))
for (path, name, gid) in sortlist:
list_html = Html("li")
ordered += list_html
name = name or self._("Unknown")
if not self.noid and gid != "":
gid_html = Html("span", " [%s]" % gid, class_="grampsid",
inline=True)
else:
gid_html = ""
if path != "":
url = self.report.build_url_fname(path, None, self.uplink)
list_html += Html("a", href=url) + name + gid_html
else:
list_html += name + str(gid_html)
# return references division to its caller
return section
def family_map_link(self, handle, url):
"""
Creates a link to the family map
@param: handle -- The family handle
@param: url -- url to be linked
"""
return Html("a", self._("Family Map"), href=url,
title=self._("Family Map"), class_="familymap", inline=True)
def display_spouse(self, partner, family, place_lat_long):
"""
Display an individual's partner
@param: partner -- The partner
@param: family -- The family
@param: place_lat_long -- For use in Family Map Pages. This will be None
if called from Family pages, which do not
create a Family Map
"""
gender = partner.get_gender()
reltype = family.get_relationship()
rtype = self._(str(family.get_relationship().xml_str()))
if reltype == FamilyRelType.MARRIED:
if gender == Person.FEMALE:
relstr = self._("Wife")
elif gender == Person.MALE:
relstr = self._("Husband")
else:
relstr = self._("Partner")
else:
relstr = self._("Partner")
# display family relationship status, and add spouse to FamilyMapPages
if self.familymappages:
self._get_event_place(partner, place_lat_long)
trow = Html("tr", class_="BeginFamily") + (
Html("td", rtype, class_="ColumnType", inline=True),
Html("td", relstr, class_="ColumnAttribute", inline=True)
)
tcell = Html("td", class_="ColumnValue")
trow += tcell
tcell += self.new_person_link(partner.get_handle(), uplink=True,
person=partner)
return trow
def display_child_link(self, chandle):
"""
display child link ...
@param: chandle -- Child handle
"""
return self.new_person_link(chandle, uplink=True)
def new_person_link(self, person_handle, uplink=False, person=None,
name_style=_NAME_STYLE_DEFAULT):
"""
creates a link for a person. If a page is generated for the person, a
hyperlink is created, else just the name of the person. The returned
vale will be an Html object if a hyperlink is generated, otherwise just
a string
@param: person_handle -- Person in database
@param: uplink -- If True, then "../../../" is inserted in front
of the result
@param: person -- Person object. This does not need to be passed.
It should be passed if the person object has
already been retrieved, as it will be used to
improve performance
"""
result = self.report.obj_dict.get(Person).get(person_handle)
# construct link, name and gid
if result is None:
# The person is not included in the webreport
link = ""
if person is None:
person = self.r_db.get_person_from_handle(person_handle)
if person:
name = self.report.get_person_name(person)
gid = person.get_gramps_id()
else:
name = _("Unknown")
gid = ""
else:
# The person has been encountered in the web report, but this does
# not necessarily mean that a page has been generated
(link, name, gid) = result
if name_style == _NAME_STYLE_FIRST and person:
name = _get_short_name(person.get_gender(),
person.get_primary_name())
name = html_escape(name)
# construct the result
if not self.noid and gid != "":
gid_html = Html("span", " [%s]" % gid, class_="grampsid",
inline=True)
else:
gid_html = ""
if link != "":
url = self.report.build_url_fname(link, uplink=uplink)
hyper = Html("a", name, gid_html, href=url, inline=True)
else:
hyper = name + str(gid_html)
return hyper
def media_link(self, media_handle, img_url, name,
uplink=False, usedescr=True):
"""
creates and returns a hyperlink to the thumbnail image
@param: media_handle -- Photo handle from report database
@param: img_url -- Thumbnail url
@param: name -- Photo description
@param: uplink -- If True, then "../../../" is inserted in front
of the result.
@param: usedescr -- Add media description
"""
url = self.report.build_url_fname_html(media_handle, "img", uplink)
name = html_escape(name)
# begin thumbnail division
with Html("div", class_="thumbnail") as thumbnail:
# begin hyperlink
if not self.create_thumbs_only:
hyper = Html("a", href=url, title=name) + (
Html("img", src=img_url, alt=name)
)
else:
hyper = Html("img", src=img_url, alt=name)
thumbnail += hyper
if usedescr:
hyper += Html("p", name, inline=True)
return thumbnail
def doc_link(self, handle, name, uplink=False, usedescr=True):
"""
create a hyperlink for the media object and returns it
@param: handle -- Document handle
@param: name -- Document name
@param: uplink -- If True, then "../../../" is inserted in front of
the result.
@param: usedescr -- Add description to hyperlink
"""
url = self.report.build_url_fname_html(handle, "img", uplink)
name = html_escape(name)
# begin thumbnail division
with Html("div", class_="thumbnail") as thumbnail:
document_url = self.report.build_url_image("document.png",
"images", uplink)
if not self.create_thumbs_only:
document_link = Html("a", href=url, title=name) + (
Html("img", src=document_url, alt=name)
)
else:
document_link = Html("img", src=document_url, alt=name)
if usedescr:
document_link += Html('br') + (
Html("span", name, inline=True)
)
thumbnail += document_link
return thumbnail
def place_link(self, handle, name, gid=None, uplink=False):
"""
Returns a hyperlink for place link
@param: handle -- repository handle from report database
@param: name -- repository title
@param: gid -- gramps id
@param: uplink -- If True, then "../../../" is inserted in front of the
result.
"""
url = self.report.build_url_fname_html(handle, "plc", uplink)
hyper = Html("a", html_escape(name), href=url, title=html_escape(name))
if not self.noid and gid:
hyper += Html("span", " [%s]" % gid, class_="grampsid", inline=True)
# return hyperlink to its callers
return hyper
def dump_place(self, place, table):
"""
Dump a place's information from within the database
@param: place -- Place object from the database
@param: table -- Table from Placedetail
"""
if place in self.report.visited:
return
self.report.visited.append(place)
# add table body
tbody = Html("tbody")
table += tbody
gid = place.gramps_id
if not self.noid and gid:
trow = Html("tr") + (
Html("td", self._("Gramps ID"), class_="ColumnAttribute",
inline=True),
Html("td", gid, class_="ColumnValue", inline=True)
)
tbody += trow
data = place.get_latitude()
if data != "":
trow = Html('tr') + (
Html("td", self._("Latitude"), class_="ColumnAttribute",
inline=True),
Html("td", data, class_="ColumnValue", inline=True)
)
tbody += trow
data = place.get_longitude()
if data != "":
trow = Html('tr') + (
Html("td", self._("Longitude"), class_="ColumnAttribute",
inline=True),
Html("td", data, class_="ColumnValue", inline=True)
)
tbody += trow
mlocation = get_main_location(self.r_db, place)
for (label, data) in [
(self._("Street"), mlocation.get(PlaceType.STREET, '')),
(self._("Locality"), mlocation.get(PlaceType.LOCALITY, '')),
(self._("City"), mlocation.get(PlaceType.CITY, '')),
(self._("Church Parish"), mlocation.get(PlaceType.PARISH, '')),
(self._("County"), mlocation.get(PlaceType.COUNTY, '')),
(self._("State/ Province"), mlocation.get(PlaceType.STATE, '')),
(self._("Postal Code"), place.get_code()),
(self._("Country"), mlocation.get(PlaceType.COUNTRY, ''))]:
if data:
trow = Html("tr") + (
Html("td", label, class_="ColumnAttribute", inline=True),
Html("td", data, class_="ColumnValue", inline=True)
)
tbody += trow
altloc = place.get_alternate_locations()
if altloc:
tbody += Html("tr") + Html("td", " ", colspan=2)
trow = Html("tr") + (
Html("th", self._("Alternate Locations"), colspan=2,
class_="ColumnAttribute", inline=True),
)
tbody += trow
for loc in (nonempt
for nonempt in altloc if not nonempt.is_empty()):
for (label, data) in [
(self._("Street"), loc.street),
(self._("Locality"), loc.locality),
(self._("City"), loc.city),
(self._("Church Parish"), loc.parish),
(self._("County"), loc.county),
(self._("State/ Province"), loc.state),
(self._("Postal Code"), loc.postal),
(self._("Country"), loc.country),]:
if data:
trow = Html("tr") + (
Html("td", label, class_="ColumnAttribute",
inline=True),
Html("td", data, class_="ColumnValue", inline=True)
)
tbody += trow
tbody += Html("tr") + Html("td", " ", colspan=2)
# display all related locations
for placeref in place.get_placeref_list():
place_date = get_date(placeref)
if place_date != "":
parent_place = self.r_db.get_place_from_handle(placeref.ref)
parent_name = parent_place.get_name().get_value()
trow = Html('tr') + (
Html("td", self._("Locations"), class_="ColumnAttribute",
inline=True),
Html("td", parent_name, class_="ColumnValue", inline=True),
Html("td", place_date, class_="ColumnValue", inline=True)
)
tbody += trow
# return place table to its callers
return table
def repository_link(self, repository_handle, name, gid=None, uplink=False):
"""
Returns a hyperlink for repository links
@param: repository_handle -- repository handle from report database
@param: name -- repository title
@param: gid -- gramps id
@param: uplink -- If True, then "../../../" is inserted in
front of the result.
"""
url = self.report.build_url_fname_html(repository_handle,
'repo', uplink)
name = html_escape(name)
hyper = Html("a", name, href=url, title=name)
if not self.noid and gid:
hyper += Html("span", '[%s]' % gid, class_="grampsid", inline=True)
return hyper
def dump_repository_ref_list(self, repo_ref_list):
"""
Dumps the repository
@param: repo_ref_list -- The list of repositories references
"""
if len(repo_ref_list) == 0:
return None
# Repository list division...
with Html("div", class_="subsection",
id="repositories") as repositories:
repositories += Html("h4", self._("Repositories"), inline=True)
with Html("table", class_="infolist") as table:
repositories += table
thead = Html("thead")
table += thead
trow = Html("tr") + (
Html("th", self._("Number"), class_="ColumnRowLabel",
inline=True),
Html("th", self._("Title"), class_="ColumnName",
inline=True),
Html("th", self._("Type"), class_="ColumnName",
inline=True),
Html("th", self._("Call number"), class_="ColumnName",
inline=True)
)
thead += trow
tbody = Html("tbody")
table += tbody
index = 1
for repo_ref in repo_ref_list:
repo = self.r_db.get_repository_from_handle(repo_ref.ref)
if repo:
trow = Html("tr") + (
Html("td", index, class_="ColumnRowLabel",
inline=True),
Html("td",
self.repository_link(repo_ref.ref,
repo.get_name(),
repo.get_gramps_id(),
self.uplink)),
Html("td",
self._(repo_ref.get_media_type().xml_str()),
class_="ColumnName"),
Html("td", repo_ref.get_call_number(),
class_="ColumnName")
)
tbody += trow
index += 1
return repositories
def dump_residence(self, has_res):
"""
Creates a residence from the database
@param: has_res -- The residence to use
"""
if not has_res:
return None
# begin residence division
with Html("div", class_="content Residence") as residence:
residence += Html("h4", self._("Residence"), inline=True)
with Html("table", class_="infolist place") as table:
residence += table
place_handle = has_res.get_place_handle()
if place_handle:
place = self.r_db.get_place_from_handle(place_handle)
if place:
self.dump_place(place, table)
descr = has_res.get_description()
if descr:
trow = Html("tr")
if len(table) == 3:
# append description row to tbody element of dump_place
table[-2] += trow
else:
# append description row to table element
table += trow
trow.extend(Html("td", self._("Description"),
class_="ColumnAttribute", inline=True))
trow.extend(Html("td", descr, class_="ColumnValue",
inline=True))
# return information to its callers
return residence
def display_bkref(self, bkref_list, depth):
"""
Display a reference list for an object class
@param: bkref_list -- The reference list
@param: depth -- The style of list to use
"""
list_style = "1", "a", "I", "A", "i"
ordered = Html("ol", class_="Col1", role="Volume-n-Page")
ordered.attr += "type = %s" % list_style[depth]
if depth > len(list_style):
return ""
# Sort by the name of the object at the bkref_class, bkref_handle
# bug 8950 : it seems it's better to sort on name + gid.
def sort_by_name_and_gid(obj):
"""
Sort by name then gramps ID
"""
return (obj[1], obj[2])
for (bkref_class, bkref_handle) in sorted(
bkref_list, key=lambda x:
sort_by_name_and_gid(self.report.obj_dict[x[0]][x[1]])):
list_html = Html("li")
path = self.report.obj_dict[bkref_class][bkref_handle][0]
name = self.report.obj_dict[bkref_class][bkref_handle][1]
gid = self.report.obj_dict[bkref_class][bkref_handle][2]
ordered += list_html
if path == "":
list_html += name
list_html += self.display_bkref(
self.report.bkref_dict[bkref_class][bkref_handle],
depth+1)
else:
url = self.report.build_url_fname(path, uplink=self.uplink)
if not self.noid and gid != "":
gid_html = Html("span", " [%s]" % gid,
class_="grampsid", inline=True)
else:
gid_html = ""
list_html += Html("a", href=url) + name + gid_html
return ordered
def display_bkref_list(self, obj_class, obj_handle):
"""
Display a reference list for an object class
@param: obj_class -- The object class to use
@param: obj_handle -- The handle to use
"""
bkref_list = self.report.bkref_dict[obj_class][obj_handle]
if not bkref_list:
return None
# begin references division and title
with Html("div", class_="subsection", id="references") as section:
section += Html("h4", self._("References"), inline=True)
depth = 0
ordered = self.display_bkref(bkref_list, depth)
section += ordered
return section
# -------------------------------------------------------------------------
# # Web Page Fortmatter and writer
# -------------------------------------------------------------------------
def xhtml_writer(self, htmlinstance, output_file, sio, date):
"""
Will format, write, and close the file
@param: output_file -- Open file that is being written to
@param: htmlinstance -- Web page created with libhtml
src/plugins/lib/libhtml.py
"""
htmlinstance.write(partial(print, file=output_file))
# closes the file
self.report.close_file(output_file, sio, date)
#################################################
#
# create the page from SurnameListPage
#
#################################################
class SurnamePage(BasePage):
"""
This will create a list of individuals with the same surname
"""
def __init__(self, report, title, surname, ppl_handle_list):
"""
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: surname -- The surname to use
@param: ppl_handle_list -- The list of people for whom we need to create
a page.
"""
BasePage.__init__(self, report, title)
# module variables
showbirth = report.options['showbirth']
showdeath = report.options['showdeath']
showpartner = report.options['showpartner']
showparents = report.options['showparents']
if surname == '':
surname = self._("<absent>")
output_file, sio = self.report.create_file(name_to_md5(surname), "srn")
self.uplink = True
(surnamepage, head,
body) = self.write_header("%s - %s" % (self._("Surname"), surname))
ldatec = 0
# begin SurnameDetail division
with Html("div", class_="content", id="SurnameDetail") as surnamedetail:
body += surnamedetail
# section title
surnamedetail += Html("h3", html_escape(surname), inline=True)
# feature request 2356: avoid genitive form
msg = self._("This page contains an index of all the individuals "
"in the database with the surname of %s. "
"Selecting the person’s name "
"will take you to that person’s "
"individual page.") % html_escape(surname)
surnamedetail += Html("p", msg, id="description")
# begin surname table and thead
with Html("table", class_="infolist primobjlist surname") as table:
surnamedetail += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
# Name Column
trow += Html("th", self._("Given Name"), class_="ColumnName",
inline=True)
if showbirth:
trow += Html("th", self._("Birth"), class_="ColumnDate",
inline=True)
if showdeath:
trow += Html("th", self._("Death"), class_="ColumnDate",
inline=True)
if showpartner:
trow += Html("th", self._("Partner"),
class_="ColumnPartner",
inline=True)
if showparents:
trow += Html("th", self._("Parents"),
class_="ColumnParents",
inline=True)
# begin table body
tbody = Html("tbody")
table += tbody
for person_handle in sorted(ppl_handle_list,
key=self.sort_on_name_and_grampsid):
person = self.r_db.get_person_from_handle(person_handle)
if person.get_change_time() > ldatec:
ldatec = person.get_change_time()
trow = Html("tr")
tbody += trow
# firstname column
link = self.new_person_link(person_handle, uplink=True,
person=person,
name_style=_NAME_STYLE_FIRST)
trow += Html("td", link, class_="ColumnName")
# birth column
if showbirth:
tcell = Html("td", class_="ColumnBirth", inline=True)
trow += tcell
birth_date = _find_birth_date(self.r_db, person)
if birth_date is not None:
if birth_date.fallback:
tcell += Html('em',
self.rlocale.get_date(birth_date),
inline=True)
else:
tcell += self.rlocale.get_date(birth_date)
else:
tcell += " "
# death column
if showdeath:
tcell = Html("td", class_="ColumnDeath", inline=True)
trow += tcell
death_date = _find_death_date(self.r_db, person)
if death_date is not None:
if death_date.fallback:
tcell += Html('em',
self.rlocale.get_date(death_date),
inline=True)
else:
tcell += self.rlocale.get_date(death_date)
else:
tcell += " "
# partner column
if showpartner:
tcell = Html("td", class_="ColumnPartner")
trow += tcell
family_list = person.get_family_handle_list()
first_family = True
if family_list:
fam_count = 0
for family_handle in family_list:
fam_count += 1
family = self.r_db.get_family_from_handle(
family_handle)
partner_handle = utils.find_spouse(
person, family)
if partner_handle:
link = self.new_person_link(partner_handle,
uplink=True)
if fam_count < len(family_list):
if isinstance(link, Html):
link.inside += ","
else:
link += ','
tcell += link
first_family = False
else:
tcell += " "
# parents column
if showparents:
parent_hdl_list = person.get_parent_family_handle_list()
if parent_hdl_list:
parent_hdl = parent_hdl_list[0]
fam = self.r_db.get_family_from_handle(parent_hdl)
f_id = fam.get_father_handle()
m_id = fam.get_mother_handle()
mother = father = None
if f_id:
father = self.r_db.get_person_from_handle(f_id)
if father:
father_name = self.get_name(father)
if m_id:
mother = self.r_db.get_person_from_handle(m_id)
if mother:
mother_name = self.get_name(mother)
if mother and father:
tcell = Html("span", father_name,
class_="father fatherNmother")
tcell += Html("span", mother_name,
class_="mother")
elif mother:
tcell = Html("span", mother_name,
class_="mother", inline=True)
elif father:
tcell = Html("span", father_name,
class_="father", inline=True)
samerow = False
else:
tcell = " "
samerow = True
trow += Html("td", tcell,
class_="ColumnParents", inline=samerow)
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(surnamepage, output_file, sio, ldatec)
#################################################
#
# creates the Family List Page and Family Pages
#
#################################################
class FamilyPages(BasePage):
"""
This class is responsible for displaying information about the 'Family'
database objects. It displays this information under the 'Families'
tab. It is told by the 'add_instances' call which 'Family's to display,
and remembers the list of Family. A single call to 'display_pages'
displays both the Family List (Index) page and all the Family
pages.
The base class 'BasePage' is initialised once for each page that is
displayed.
"""
def __init__(self, report):
"""
@param: report -- The instance of the main report class for
this report
"""
BasePage.__init__(self, report, title="")
self.family_dict = defaultdict(set)
self.person = None
self.familymappages = None
def display_pages(self, title):
"""
Generate and output the pages under the Family tab, namely the family
index and the individual family pages.
@param: title -- Is the title of the web page
"""
LOG.debug("obj_dict[Family]")
for item in self.report.obj_dict[Family].items():
LOG.debug(" %s", str(item))
with self.r_user.progress(_("Narrated Web Site Report"),
_("Creating family pages..."),
len(self.report.obj_dict[Family]) + 1
) as step:
self.familylistpage(self.report, title,
self.report.obj_dict[Family].keys())
for family_handle in self.report.obj_dict[Family]:
step()
self.familypage(self.report, title, family_handle)
def familylistpage(self, report, title, fam_list):
"""
Create a family index
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: fam_list -- The handle for the place to add
"""
BasePage.__init__(self, report, title)
output_file, sio = self.report.create_file("families")
familieslistpage, head, body = self.write_header(self._("Families"))
ldatec = 0
prev_letter = " "
# begin Family Division
with Html("div", class_="content", id="Relationships") as relationlist:
body += relationlist
# Families list page message
msg = self._("This page contains an index of all the "
"families/ relationships in the "
"database, sorted by their family name/ surname. "
"Clicking on a person’s "
"name will take you to their "
"family/ relationship’s page.")
relationlist += Html("p", msg, id="description")
# go through all the families, and construct a dictionary of all the
# people and the families thay are involved in. Note that the people
# in the list may be involved in OTHER families, that are not listed
# because they are not in the original family list.
pers_fam_dict = defaultdict(list)
for family_handle in fam_list:
family = self.r_db.get_family_from_handle(family_handle)
if family:
if family.get_change_time() > ldatec:
ldatec = family.get_change_time()
husband_handle = family.get_father_handle()
spouse_handle = family.get_mother_handle()
if husband_handle:
pers_fam_dict[husband_handle].append(family)
if spouse_handle:
pers_fam_dict[spouse_handle].append(family)
# add alphabet navigation
index_list = get_first_letters(self.r_db, pers_fam_dict.keys(),
_KEYPERSON, rlocale=self.rlocale)
alpha_nav = alphabet_navigation(index_list, self.rlocale)
if alpha_nav:
relationlist += alpha_nav
# begin families table and table head
with Html("table", class_="infolist relationships") as table:
relationlist += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
# set up page columns
trow.extend(
Html("th", trans, class_=colclass, inline=True)
for trans, colclass in [
(self._("Letter"), "ColumnRowLabel"),
(self._("Person"), "ColumnPartner"),
(self._("Family"), "ColumnPartner"),
(self._("Marriage"), "ColumnDate"),
(self._("Divorce"), "ColumnDate")
]
)
tbody = Html("tbody")
table += tbody
# begin displaying index list
ppl_handle_list = sort_people(self.r_db, pers_fam_dict.keys(),
self.rlocale)
first = True
for (surname, handle_list) in ppl_handle_list:
if surname and not surname.isspace():
letter = get_index_letter(first_letter(surname),
index_list,
self.rlocale)
else:
letter = ' '
# get person from sorted database list
for person_handle in sorted(
handle_list, key=self.sort_on_name_and_grampsid):
person = self.r_db.get_person_from_handle(person_handle)
if person:
family_list = person.get_family_handle_list()
first_family = True
for family_handle in family_list:
get_family = self.r_db.get_family_from_handle
family = get_family(family_handle)
trow = Html("tr")
tbody += trow
tcell = Html("td", class_="ColumnRowLabel")
trow += tcell
if first or primary_difference(letter,
prev_letter,
self.rlocale):
first = False
prev_letter = letter
trow.attr = 'class="BeginLetter"'
ttle = self._("Families beginning with "
"letter ")
tcell += Html("a", letter, name=letter,
title=ttle + letter,
inline=True)
else:
tcell += ' '
tcell = Html("td", class_="ColumnPartner")
trow += tcell
if first_family:
trow.attr = 'class ="BeginFamily"'
tcell += self.new_person_link(
person_handle, uplink=self.uplink)
first_family = False
else:
tcell += ' '
tcell = Html("td", class_="ColumnPartner")
trow += tcell
tcell += self.family_link(
family.get_handle(),
self.report.get_family_name(family),
family.get_gramps_id(), self.uplink)
# family events; such as marriage and divorce
# events
fam_evt_ref_list = family.get_event_ref_list()
tcell1 = Html("td", class_="ColumnDate",
inline=True)
tcell2 = Html("td", class_="ColumnDate",
inline=True)
trow += (tcell1, tcell2)
if fam_evt_ref_list:
fam_evt_srt_ref_list = sorted(
fam_evt_ref_list,
key=self.sort_on_grampsid)
for evt_ref in fam_evt_srt_ref_list:
evt = self.r_db.get_event_from_handle(
evt_ref.ref)
if evt:
evt_type = evt.get_type()
if evt_type in [EventType.MARRIAGE,
EventType.DIVORCE]:
if (evt_type ==
EventType.MARRIAGE):
tcell1 += self.rlocale.get_date(
evt.get_date_object())
else:
tcell1 += ' '
if (evt_type ==
EventType.DIVORCE):
tcell2 += self.rlocale.get_date(
evt.get_date_object())
else:
tcell2 += ' '
else:
tcell1 += ' '
tcell2 += ' '
first_family = False
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(familieslistpage, output_file, sio, ldatec)
def familypage(self, report, title, family_handle):
"""
Create a family page
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: family_handle -- The handle for the family to add
"""
family = report.database.get_family_from_handle(family_handle)
if not family:
return
BasePage.__init__(self, report, title, family.get_gramps_id())
ldatec = family.get_change_time()
self.bibli = Bibliography()
self.uplink = True
family_name = self.report.get_family_name(family)
self.page_title = family_name
self.familymappages = report.options["familymappages"]
output_file, sio = self.report.create_file(family.get_handle(), "fam")
familydetailpage, head, body = self.write_header(family_name)
# begin FamilyDetaill division
with Html("div", class_="content",
id="RelationshipDetail") as relationshipdetail:
body += relationshipdetail
# family media list for initial thumbnail
if self.create_media:
media_list = family.get_media_list()
# If Event pages are not being created, then we need to display
# the family event media here
if not self.inc_events:
for evt_ref in family.get_event_ref_list():
event = self.r_db.get_event_from_handle(evt_ref.ref)
media_list += event.get_media_list()
thumbnail = self.disp_first_img_as_thumbnail(media_list,
family)
if thumbnail:
relationshipdetail += thumbnail
self.person = None # no longer used
relationshipdetail += Html(
"h2", self.page_title, inline=True) + (
Html('sup') + (Html('small') +
self.get_citation_links(
family.get_citation_list())))
# display relationships
families = self.display_family_relationships(family, None)
if families is not None:
relationshipdetail += families
# display additional images as gallery
if self.create_media and media_list:
addgallery = self.disp_add_img_as_gallery(media_list, family)
if addgallery:
relationshipdetail += addgallery
# Narrative subsection
notelist = family.get_note_list()
if notelist:
relationshipdetail += self.display_note_list(notelist)
# display family LDS ordinance...
family_lds_ordinance_list = family.get_lds_ord_list()
if family_lds_ordinance_list:
relationshipdetail += self.display_lds_ordinance(family)
# get attribute list
attrlist = family.get_attribute_list()
if attrlist:
attrsection, attrtable = self.display_attribute_header()
self.display_attr_list(attrlist, attrtable)
relationshipdetail += attrsection
# source references
srcrefs = self.display_ind_sources(family)
if srcrefs:
relationshipdetail += srcrefs
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(familydetailpage, output_file, sio, ldatec)
######################################################
# #
# Place Pages #
# #
######################################################
class PlacePages(BasePage):
"""
This class is responsible for displaying information about the 'Person'
database objects. It displays this information under the 'Events'
tab. It is told by the 'add_instances' call which 'Person's to display,
and remembers the list of persons. A single call to 'display_pages'
displays both the Event List (Index) page and all the Event
pages.
The base class 'BasePage' is initialised once for each page that is
displayed.
"""
def __init__(self, report):
"""
@param: report -- The instance of the main report class for
this report
"""
BasePage.__init__(self, report, title="")
self.place_dict = defaultdict(set)
self.placemappages = None
self.mapservice = None
self.person = None
self.familymappages = None
def display_pages(self, title):
"""
Generate and output the pages under the Place tab, namely the place
index and the individual place pages.
@param: title -- Is the title of the web page
"""
LOG.debug("obj_dict[Place]")
for item in self.report.obj_dict[Place].items():
LOG.debug(" %s", str(item))
with self.r_user.progress(_("Narrated Web Site Report"),
_("Creating place pages"),
len(self.report.obj_dict[Place]) + 1
) as step:
self.placelistpage(self.report, title,
self.report.obj_dict[Place].keys())
for place_handle in self.report.obj_dict[Place]:
step()
self.placepage(self.report, title, place_handle)
def placelistpage(self, report, title, place_handles):
"""
Create a place index
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: place_handles -- The handle for the place to add
"""
BasePage.__init__(self, report, title)
output_file, sio = self.report.create_file("places")
placelistpage, head, body = self.write_header(self._("Places"))
ldatec = 0
prev_letter = " "
# begin places division
with Html("div", class_="content", id="Places") as placelist:
body += placelist
# place list page message
msg = self._("This page contains an index of all the places in the "
"database, sorted by their title. "
"Clicking on a place’s "
"title will take you to that place’s page.")
placelist += Html("p", msg, id="description")
# begin alphabet navigation
index_list = get_first_letters(self.r_db, place_handles,
_KEYPLACE, rlocale=self.rlocale)
alpha_nav = alphabet_navigation(index_list, self.rlocale)
if alpha_nav is not None:
placelist += alpha_nav
# begin places table and table head
with Html("table",
class_="infolist primobjlist placelist") as table:
placelist += table
# begin table head
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
trow.extend(
Html("th", label, class_=colclass, inline=True)
for (label, colclass) in [
[self._("Letter"), "ColumnLetter"],
[self._("Place Name | Name"), "ColumnName"],
[self._("State/ Province"), "ColumnState"],
[self._("Country"), "ColumnCountry"],
[self._("Latitude"), "ColumnLatitude"],
[self._("Longitude"), "ColumnLongitude"]
]
)
# bug 9495 : incomplete display of place hierarchy labels
def sort_by_place_name(obj):
""" sort by lower case place name. """
name = self.report.obj_dict[Place][obj][1]
return name.lower()
handle_list = sorted(place_handles,
key=lambda x: sort_by_place_name(x))
first = True
# begin table body
tbody = Html("tbody")
table += tbody
for place_handle in handle_list:
place = self.r_db.get_place_from_handle(place_handle)
if place:
if place.get_change_time() > ldatec:
ldatec = place.get_change_time()
place_title = self.report.obj_dict[Place][place_handle][1]
main_location = get_main_location(self.r_db, place)
if place_title and place_title != " ":
letter = get_index_letter(first_letter(place_title),
index_list,
self.rlocale)
else:
letter = ' '
trow = Html("tr")
tbody += trow
tcell = Html("td", class_="ColumnLetter", inline=True)
trow += tcell
if first or primary_difference(letter, prev_letter,
self.rlocale):
first = False
prev_letter = letter
trow.attr = 'class = "BeginLetter"'
ttle = self._("Places beginning with letter %s") % letter
tcell += Html("a", letter, name=letter, title=ttle)
else:
tcell += " "
trow += Html("td",
self.place_link(
place.get_handle(),
place_title, place.get_gramps_id()),
class_="ColumnName")
trow.extend(
Html("td", data or " ", class_=colclass,
inline=True)
for (colclass, data) in [
["ColumnState",
main_location.get(PlaceType.STATE, '')],
["ColumnCountry",
main_location.get(PlaceType.COUNTRY, '')]
]
)
tcell1 = Html("td", class_="ColumnLatitude",
inline=True)
tcell2 = Html("td", class_="ColumnLongitude",
inline=True)
trow += (tcell1, tcell2)
if place.lat and place.long:
latitude, longitude = conv_lat_lon(place.lat,
place.long,
"DEG")
tcell1 += latitude
tcell2 += longitude
else:
tcell1 += ' '
tcell2 += ' '
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(placelistpage, output_file, sio, ldatec)
def placepage(self, report, title, place_handle):
"""
Create a place page
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: place_handle -- The handle for the place to add
"""
place = report.database.get_place_from_handle(place_handle)
if not place:
return None
BasePage.__init__(self, report, title, place.get_gramps_id())
self.bibli = Bibliography()
place_name = self.report.obj_dict[Place][place_handle][1]
ldatec = place.get_change_time()
output_file, sio = self.report.create_file(place_handle, "plc")
self.uplink = True
self.page_title = place_name
placepage, head, body = self.write_header(_("Places"))
self.placemappages = self.report.options['placemappages']
self.mapservice = self.report.options['mapservice']
self.googlemapkey = self.report.options['googlemapkey']
# begin PlaceDetail Division
with Html("div", class_="content", id="PlaceDetail") as placedetail:
body += placedetail
if self.create_media:
media_list = place.get_media_list()
thumbnail = self.disp_first_img_as_thumbnail(media_list,
place)
if thumbnail is not None:
placedetail += thumbnail
# add section title
placedetail += Html("h3",
html_escape(place_name),
inline=True)
# begin summaryarea division and places table
with Html("div", id='summaryarea') as summaryarea:
placedetail += summaryarea
with Html("table", class_="infolist place") as table:
summaryarea += table
# list the place fields
self.dump_place(place, table)
# place gallery
if self.create_media:
placegallery = self.disp_add_img_as_gallery(media_list, place)
if placegallery is not None:
placedetail += placegallery
# place notes
notelist = self.display_note_list(place.get_note_list())
if notelist is not None:
placedetail += notelist
# place urls
urllinks = self.display_url_list(place.get_url_list())
if urllinks is not None:
placedetail += urllinks
# add place map here
# Link to GRAMPS marker
fname = "/".join(['images', 'marker.png'])
marker_path = self.report.build_url_image("marker.png",
"images", self.uplink)
if self.placemappages:
if place and (place.lat and place.long):
latitude, longitude = conv_lat_lon(place.get_latitude(),
place.get_longitude(),
"D.D8")
placetitle = place_name
# add narrative-maps CSS...
fname = "/".join(["css", "narrative-maps.css"])
url = self.report.build_url_fname(fname, None, self.uplink)
head += Html("link", href=url, type="text/css",
media="screen", rel="stylesheet")
# add MapService specific javascript code
src_js = GOOGLE_MAPS + "api/js?sensor=false"
if self.mapservice == "Google":
if self.googlemapkey:
src_js += "&key=" + self.googlemapkey
head += Html("script", type="text/javascript",
src=src_js, inline=True)
else:
url = "http://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css"
head += Html("link", href=url, type="text/javascript",
rel="stylesheet")
src_js = "http://ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js"
head += Html("script", type="text/javascript",
src=src_js, inline=True)
src_js = "http://openlayers.org/en/v3.17.1/build/ol.js"
head += Html("script", type="text/javascript",
src=src_js, inline=True)
url = "http://openlayers.org/en/v3.17.1/css/ol.css"
head += Html("link", href=url, type="text/javascript",
rel="stylesheet")
src_js = "http://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"
head += Html("script", type="text/javascript",
src=src_js, inline=True)
# section title
placedetail += Html("h4", self._("Place Map"), inline=True)
# begin map_canvas division
with Html("div", id="map_canvas", inline=True) as canvas:
placedetail += canvas
# Begin inline javascript code because jsc is a
# docstring, it does NOT have to be properly indented
if self.mapservice == "Google":
with Html("script", type="text/javascript",
indent=False) as jsc:
head += jsc
# Google adds Latitude/ Longitude to its maps...
plce = placetitle.replace("'", "\\'")
jsc += MARKER_PATH % marker_path
jsc += MARKERS % ([[plce,
latitude,
longitude,
1]],
latitude, longitude,
10)
else:
# OpenStreetMap (OSM) adds Longitude/ Latitude
# to its maps, and needs a country code in
# lowercase letters...
with Html("script", type="text/javascript") as jsc:
canvas += jsc
param1 = xml_lang()[3:5].lower()
jsc += MARKER_PATH % marker_path
jsc += OSM_MARKERS % ([[float(longitude),
float(latitude),
placetitle]],
longitude, latitude, 10)
# add javascript function call to body element
body.attr += ' onload = "initialize();" '
# add div for popups.
with Html("div", id="popup", inline=True) as popup:
placedetail += popup
# source references
srcrefs = self.display_ind_sources(place)
if srcrefs is not None:
placedetail += srcrefs
# References list
ref_list = self.display_bkref_list(Place, place_handle)
if ref_list is not None:
placedetail += ref_list
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(placepage, output_file, sio, ldatec)
#################################################
#
# creates the Event List Page and EventPages
#
#################################################
class EventPages(BasePage):
"""
This class is responsible for displaying information about the 'Person'
database objects. It displays this information under the 'Events'
tab. It is told by the 'add_instances' call which 'Person's to display,
and remembers the list of persons. A single call to 'display_pages'
displays both the Event List (Index) page and all the Event
pages.
The base class 'BasePage' is initialised once for each page that is
displayed.
"""
def __init__(self, report):
"""
@param: report -- The instance of the main report class for
this report
"""
BasePage.__init__(self, report, title="")
self.event_handle_list = []
self.event_types = []
self.event_dict = defaultdict(set)
def display_pages(self, title):
"""
Generate and output the pages under the Event tab, namely the event
index and the individual event pages.
@param: title -- Is the title of the web page
"""
LOG.debug("obj_dict[Event]")
for item in self.report.obj_dict[Event].items():
LOG.debug(" %s", str(item))
event_handle_list = self.report.obj_dict[Event].keys()
event_types = []
for event_handle in event_handle_list:
event = self.r_db.get_event_from_handle(event_handle)
event_types.append(self._(event.get_type().xml_str()))
with self.r_user.progress(_("Narrated Web Site Report"),
_("Creating event pages"),
len(event_handle_list) + 1
) as step:
self.eventlistpage(self.report, title, event_types,
event_handle_list)
for event_handle in event_handle_list:
step()
self.eventpage(self.report, title, event_handle)
def eventlistpage(self, report, title, event_types, event_handle_list):
"""
Will create the event list page
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: event_types -- A list of the type in the events database
@param: event_handle_list -- A list of event handles
"""
BasePage.__init__(self, report, title)
ldatec = 0
prev_letter = " "
output_file, sio = self.report.create_file("events")
eventslistpage, head, body = self.write_header(self._("Events"))
# begin events list division
with Html("div", class_="content", id="EventList") as eventlist:
body += eventlist
msg = self._("This page contains an index of all the events in the "
"database, sorted by their type and date (if one is "
"present). Clicking on an event’s Gramps ID "
"will open a page for that event.")
eventlist += Html("p", msg, id="description")
# get alphabet navigation...
index_list = get_first_letters(self.r_db, event_types,
_ALPHAEVENT)
alpha_nav = alphabet_navigation(index_list, self.rlocale)
if alpha_nav:
eventlist += alpha_nav
# begin alphabet event table
with Html("table",
class_="infolist primobjlist alphaevent") as table:
eventlist += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
trow.extend(
Html("th", label, class_=colclass, inline=True)
for (label, colclass) in [
(self._("Letter"), "ColumnRowLabel"),
(self._("Type"), "ColumnType"),
(self._("Date"), "ColumnDate"),
(self._("Gramps ID"), "ColumnGRAMPSID"),
(self._("Person"), "ColumnPerson")
]
)
tbody = Html("tbody")
table += tbody
# separate events by their type and then thier event handles
for (evt_type,
data_list) in sort_event_types(self.r_db,
event_types,
event_handle_list,
self.rlocale):
first = True
_event_displayed = []
# sort datalist by date of event and by event handle...
data_list = sorted(data_list, key=itemgetter(0, 1))
first_event = True
for (sort_value, event_handle) in data_list:
event = self.r_db.get_event_from_handle(event_handle)
_type = event.get_type()
gid = event.get_gramps_id()
if event.get_change_time() > ldatec:
ldatec = event.get_change_time()
# check to see if we have listed this gramps_id yet?
if gid not in _event_displayed:
# family event
if int(_type) in _EVENTMAP:
handle_list = set(
self.r_db.find_backlink_handles(
event_handle,
include_classes=['Family', 'Person']))
else:
handle_list = set(
self.r_db.find_backlink_handles(
event_handle,
include_classes=['Person']))
if handle_list:
trow = Html("tr")
tbody += trow
# set up hyperlinked letter for
# alphabet_navigation
tcell = Html("td", class_="ColumnLetter",
inline=True)
trow += tcell
if evt_type and not evt_type.isspace():
letter = get_index_letter(
self._(str(evt_type)[0].capitalize()),
index_list, self.rlocale)
else:
letter = " "
if first or primary_difference(letter,
prev_letter,
self.rlocale):
first = False
prev_letter = letter
t_a = 'class = "BeginLetter BeginType"'
trow.attr = t_a
ttle = self._("Event types beginning "
"with letter %s") % letter
tcell += Html("a", letter, name=letter,
id_=letter, title=ttle,
inline=True)
else:
tcell += " "
# display Event type if first in the list
tcell = Html("td", class_="ColumnType",
title=self._(evt_type),
inline=True)
trow += tcell
if first_event:
tcell += self._(evt_type)
if trow.attr == "":
trow.attr = 'class = "BeginType"'
else:
tcell += " "
# event date
tcell = Html("td", class_="ColumnDate",
inline=True)
trow += tcell
date = Date.EMPTY
if event:
date = event.get_date_object()
if date and date is not Date.EMPTY:
tcell += self.rlocale.get_date(date)
else:
tcell += " "
# Gramps ID
trow += Html("td", class_="ColumnGRAMPSID") + (
self.event_grampsid_link(event_handle,
gid, None)
)
# Person(s) column
tcell = Html("td", class_="ColumnPerson")
trow += tcell
# classname can either be a person or a family
first_person = True
# get person(s) for ColumnPerson
sorted_list = sorted(handle_list)
self.complete_people(tcell, first_person,
sorted_list,
uplink=False)
_event_displayed.append(gid)
first_event = False
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page ut for processing
# and close the file
self.xhtml_writer(eventslistpage, output_file, sio, ldatec)
def _geteventdate(self, event_handle):
"""
Get the event date
@param: event_handle -- The handle for the event to use
"""
event_date = Date.EMPTY
event = self.r_db.get_event_from_handle(event_handle)
if event:
date = event.get_date_object()
if date:
# returns the date in YYYY-MM-DD format
return Date(date.get_year_calendar("Gregorian"),
date.get_month(), date.get_day())
# return empty date string
return event_date
def event_grampsid_link(self, handle, grampsid, uplink):
"""
Create a hyperlink from event handle, but show grampsid
@param: handle -- The handle for the event
@param: grampsid -- The gramps ID to display
@param: uplink -- If True, then "../../../" is inserted in front of
the result.
"""
url = self.report.build_url_fname_html(handle, "evt", uplink)
# return hyperlink to its caller
return Html("a", grampsid, href=url, title=grampsid, inline=True)
def eventpage(self, report, title, event_handle):
"""
Creates the individual event page
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: event_handle -- The event handle for the database
"""
event = report.database.get_event_from_handle(event_handle)
BasePage.__init__(self, report, title, event.get_gramps_id())
if not event:
return None
ldatec = event.get_change_time()
event_media_list = event.get_media_list()
self.uplink = True
subdirs = True
evt_type = self._(event.get_type().xml_str())
self.page_title = "%(eventtype)s" % {'eventtype' : evt_type}
self.bibli = Bibliography()
output_file, sio = self.report.create_file(event_handle, "evt")
eventpage, head, body = self.write_header(self._("Events"))
# start event detail division
with Html("div", class_="content", id="EventDetail") as eventdetail:
body += eventdetail
thumbnail = self.disp_first_img_as_thumbnail(event_media_list,
event)
if thumbnail is not None:
eventdetail += thumbnail
# display page title
eventdetail += Html("h3", self.page_title, inline=True)
# begin eventdetail table
with Html("table", class_="infolist eventlist") as table:
eventdetail += table
tbody = Html("tbody")
table += tbody
evt_gid = event.get_gramps_id()
if not self.noid and evt_gid:
trow = Html("tr") + (
Html("td", self._("Gramps ID"),
class_="ColumnAttribute", inline=True),
Html("td", evt_gid,
class_="ColumnGRAMPSID", inline=True)
)
tbody += trow
# get event data
#
# for more information: see get_event_data()
#
event_data = self.get_event_data(event, event_handle,
subdirs, evt_gid)
for (label, colclass, data) in event_data:
if data:
trow = Html("tr") + (
Html("td", label, class_="ColumnAttribute",
inline=True),
Html('td', data, class_="Column" + colclass)
)
tbody += trow
# Narrative subsection
notelist = event.get_note_list()
notelist = self.display_note_list(notelist)
if notelist is not None:
eventdetail += notelist
# get attribute list
attrlist = event.get_attribute_list()
if attrlist:
attrsection, attrtable = self.display_attribute_header()
self.display_attr_list(attrlist, attrtable)
eventdetail += attrsection
# event source references
srcrefs = self.display_ind_sources(event)
if srcrefs is not None:
eventdetail += srcrefs
# display additional images as gallery
if self.create_media:
addgallery = self.disp_add_img_as_gallery(event_media_list,
event)
if addgallery:
eventdetail += addgallery
# References list
ref_list = self.display_bkref_list(Event, event_handle)
if ref_list is not None:
eventdetail += ref_list
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the page
self.xhtml_writer(eventpage, output_file, sio, ldatec)
#################################################
#
# Creates the Surname List page
#
#################################################
class SurnameListPage(BasePage):
"""
This class is responsible for displaying the list of Surnames
"""
ORDER_BY_NAME = 0
ORDER_BY_COUNT = 1
def __init__(self, report, title, ppl_handle_list,
order_by=ORDER_BY_NAME, filename="surnames"):
"""
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: ppl_handle_list -- The list of people for whom we need to create
a page.
@param: order_by -- The way to sort surnames :
Surnames or Surnames count
@param: filename -- The name to use for the Surnames page
"""
BasePage.__init__(self, report, title)
prev_surname = ""
prev_letter = " "
if order_by == self.ORDER_BY_NAME:
output_file, sio = self.report.create_file(filename)
surnamelistpage, head, body = self.write_header(self._('Surnames'))
else:
output_file, sio = self.report.create_file("surnames_count")
(surnamelistpage, head,
body) = self.write_header(self._('Surnames by person count'))
# begin surnames division
with Html("div", class_="content", id="surnames") as surnamelist:
body += surnamelist
# page message
msg = self._('This page contains an index of all the '
'surnames in the database. Selecting a link '
'will lead to a list of individuals in the '
'database with this same surname.')
surnamelist += Html("p", msg, id="description")
# add alphabet navigation...
# only if surname list not surname count
if order_by == self.ORDER_BY_NAME:
index_list = get_first_letters(self.r_db, ppl_handle_list,
_KEYPERSON, rlocale=self.rlocale)
alpha_nav = alphabet_navigation(index_list, self.rlocale)
if alpha_nav is not None:
surnamelist += alpha_nav
if order_by == self.ORDER_BY_COUNT:
table_id = 'SortByCount'
else:
table_id = 'SortByName'
# begin surnamelist table and table head
with Html("table", class_="infolist primobjlist surnamelist",
id=table_id) as table:
surnamelist += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
trow += Html("th", self._("Letter"), class_="ColumnLetter",
inline=True)
# create table header surname hyperlink
fname = self.report.surname_fname + self.ext
tcell = Html("th", class_="ColumnSurname", inline=True)
trow += tcell
hyper = Html("a", self._("Surname"),
href=fname, title=self._("Surnames"))
tcell += hyper
# create table header number of people hyperlink
fname = "surnames_count" + self.ext
tcell = Html("th", class_="ColumnQuantity", inline=True)
trow += tcell
num_people = self._("Number of People")
hyper = Html("a", num_people, href=fname, title=num_people)
tcell += hyper
# begin table body
with Html("tbody") as tbody:
table += tbody
ppl_handle_list = sort_people(self.r_db, ppl_handle_list,
self.rlocale)
if order_by == self.ORDER_BY_COUNT:
temp_list = {}
for (surname, data_list) in ppl_handle_list:
index_val = "%90d_%s" % (999999999-len(data_list),
surname)
temp_list[index_val] = (surname, data_list)
ppl_handle_list = (temp_list[key]
for key in sorted(temp_list,
key=self.rlocale.sort_key))
first = True
first_surname = True
for (surname, data_list) in ppl_handle_list:
if surname and not surname.isspace():
letter = first_letter(surname)
if order_by == self.ORDER_BY_NAME:
# There will only be an alphabetic index list if
# the ORDER_BY_NAME page is being generated
letter = get_index_letter(letter, index_list,
self.rlocale)
else:
letter = ' '
surname = self._("<absent>")
trow = Html("tr")
tbody += trow
tcell = Html("td", class_="ColumnLetter", inline=True)
trow += tcell
if first or primary_difference(letter, prev_letter,
self.rlocale):
first = False
prev_letter = letter
trow.attr = 'class = "BeginLetter"'
ttle = self._("Surnames beginning with "
"letter %s") % letter
hyper = Html("a", letter, name=letter,
title=ttle, inline=True)
tcell += hyper
elif first_surname or surname != prev_surname:
first_surname = False
tcell += " "
prev_surname = surname
trow += Html("td",
self.surname_link(name_to_md5(surname),
#html_escape(surname)),
surname),
class_="ColumnSurname", inline=True)
trow += Html("td", len(data_list),
class_="ColumnQuantity", inline=True)
# create footer section
# add clearline for proper styling
footer = self.write_footer(None)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(surnamelistpage,
output_file, sio, 0) # 0 => current date modification
def surname_link(self, fname, name, opt_val=None, uplink=False):
"""
Create a link to the surname page.
@param: fname -- Path to the file name
@param: name -- Name to see in the link
@param: opt_val -- Option value to use
@param: uplink -- If True, then "../../../" is inserted in front of
the result.
"""
url = self.report.build_url_fname_html(fname, "srn", uplink)
hyper = Html("a", html_escape(name), href=url, title=name, inline=True)
if opt_val is not None:
hyper += opt_val
# return hyperlink to its caller
return hyper
class IntroductionPage(BasePage):
"""
This class is responsible for displaying information
about the introduction page.
"""
def __init__(self, report, title):
"""
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
"""
BasePage.__init__(self, report, title)
ldatec = 0
output_file, sio = self.report.create_file(report.intro_fname)
intropage, head, body = self.write_header(self._('Introduction'))
# begin Introduction division
with Html("div", class_="content", id="Introduction") as section:
body += section
introimg = self.add_image('introimg')
if introimg is not None:
section += introimg
note_id = report.options['intronote']
if note_id:
note = self.r_db.get_note_from_gramps_id(note_id)
note_text = self.get_note_format(note, False)
# attach note
section += note_text
# last modification of this note
ldatec = note.get_change_time()
# add clearline for proper styling
# create footer section
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(intropage, output_file, sio, ldatec)
class HomePage(BasePage):
"""
This class is responsible for displaying information about the Home page.
"""
def __init__(self, report, title):
"""
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
"""
BasePage.__init__(self, report, title)
ldatec = 0
output_file, sio = self.report.create_file("index")
homepage, head, body = self.write_header(self._('Home'))
# begin home division
with Html("div", class_="content", id="Home") as section:
body += section
homeimg = self.add_image('homeimg')
if homeimg is not None:
section += homeimg
note_id = report.options['homenote']
if note_id:
note = self.r_db.get_note_from_gramps_id(note_id)
note_text = self.get_note_format(note, False)
# attach note
section += note_text
# last modification of this note
ldatec = note.get_change_time()
# create clear line for proper styling
# create footer section
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(homepage, output_file, sio, ldatec)
#################################################
#
# Passes citations through to the Sources page
#
#################################################
class CitationPages(BasePage):
"""
This class is responsible for displaying information about the 'Citation'
database objects. It passes this information to the 'Sources' tab. It is
told by the 'add_instances' call which 'Citation's to display.
"""
def __init__(self, report):
"""
@param: report -- The instance of the main report class for
this report
"""
BasePage.__init__(self, report, title="")
def display_pages(self, title):
pass
#################################################
#
# creates the Source List Page and Source Pages
#
#################################################
class SourcePages(BasePage):
"""
This class is responsible for displaying information about the 'Source'
database objects. It displays this information under the 'Sources'
tab. It is told by the 'add_instances' call which 'Source's to display,
and remembers the list of persons. A single call to 'display_pages'
displays both the Individual List (Index) page and all the Individual
pages.
The base class 'BasePage' is initialised once for each page that is
displayed.
"""
def __init__(self, report):
"""
@param: report -- The instance of the main report class for
this report
"""
BasePage.__init__(self, report, title="")
self.source_dict = defaultdict(set)
self.navigation = None
self.citationreferents = None
def display_pages(self, title):
"""
Generate and output the pages under the Sources tab, namely the sources
index and the individual sources pages.
@param: title -- Is the title of the web page
"""
LOG.debug("obj_dict[Source]")
for item in self.report.obj_dict[Source].items():
LOG.debug(" %s", str(item))
with self.r_user.progress(_("Narrated Web Site Report"),
_("Creating source pages"),
len(self.report.obj_dict[Source]) + 1
) as step:
self.sourcelistpage(self.report, title,
self.report.obj_dict[Source].keys())
for source_handle in self.report.obj_dict[Source]:
step()
self.sourcepage(self.report, title, source_handle)
def sourcelistpage(self, report, title, source_handles):
"""
Generate and output the Sources index page.
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: source_handles -- A list of the handles of the sources to be
displayed
"""
BasePage.__init__(self, report, title)
source_dict = {}
output_file, sio = self.report.create_file("sources")
sourcelistpage, head, body = self.write_header(self._("Sources"))
# begin source list division
with Html("div", class_="content", id="Sources") as sourceslist:
body += sourceslist
# Sort the sources
for handle in source_handles:
source = self.r_db.get_source_from_handle(handle)
if source is not None:
key = source.get_title() + source.get_author()
key += str(source.get_gramps_id())
source_dict[key] = (source, handle)
keys = sorted(source_dict, key=self.rlocale.sort_key)
msg = self._("This page contains an index of all the sources "
"in the database, sorted by their title. "
"Clicking on a source’s "
"title will take you to that source’s page.")
sourceslist += Html("p", msg, id="description")
# begin sourcelist table and table head
with Html("table",
class_="infolist primobjlist sourcelist") as table:
sourceslist += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
header_row = [
(self._("Number"), "ColumnRowLabel"),
(self._("Author"), "ColumnAuthor"),
(self._("Source Name|Name"), "ColumnName")]
trow.extend(
Html("th", label or " ", class_=colclass, inline=True)
for (label, colclass) in header_row
)
# begin table body
tbody = Html("tbody")
table += tbody
for index, key in enumerate(keys):
source, source_handle = source_dict[key]
trow = Html("tr") + (
Html("td", index + 1, class_="ColumnRowLabel",
inline=True)
)
tbody += trow
trow.extend(
Html("td", source.get_author(), class_="ColumnAuthor",
inline=True)
)
trow.extend(
Html("td", self.source_link(source_handle,
source.get_title(),
source.get_gramps_id()),
class_="ColumnName")
)
# add clearline for proper styling
# add footer section
footer = self.write_footer(None)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(sourcelistpage, output_file, sio, 0)
def sourcepage(self, report, title, source_handle):
"""
Generate and output an individual Source page.
@param: report -- The instance of the main report class
for this report
@param: title -- Is the title of the web page
@param: source_handle -- The handle of the source to be output
"""
source = report.database.get_source_from_handle(source_handle)
BasePage.__init__(self, report, title, source.get_gramps_id())
if not source:
return
self.page_title = source.get_title()
inc_repositories = self.report.options["inc_repository"]
self.navigation = self.report.options['navigation']
self.citationreferents = self.report.options['citationreferents']
output_file, sio = self.report.create_file(source_handle, "src")
self.uplink = True
sourcepage, head, body = self.write_header(
"%s - %s" % (self._('Sources'), self.page_title))
ldatec = 0
# begin source detail division
with Html("div", class_="content", id="SourceDetail") as sourcedetail:
body += sourcedetail
media_list = source.get_media_list()
if self.create_media and media_list:
thumbnail = self.disp_first_img_as_thumbnail(media_list,
source)
if thumbnail is not None:
sourcedetail += thumbnail
# add section title
sourcedetail += Html("h3", html_escape(source.get_title()),
inline=True)
# begin sources table
with Html("table", class_="infolist source") as table:
sourcedetail += table
tbody = Html("tbody")
table += tbody
source_gid = False
if not self.noid and self.gid:
source_gid = source.get_gramps_id()
# last modification of this source
ldatec = source.get_change_time()
for (label, value) in [
(self._("Gramps ID"), source_gid),
(self._("Author"), source.get_author()),
(self._("Abbreviation"), source.get_abbreviation()),
(self._("Publication information"),
source.get_publication_info())]:
if value:
trow = Html("tr") + (
Html("td", label, class_="ColumnAttribute",
inline=True),
Html("td", value, class_="ColumnValue", inline=True)
)
tbody += trow
# Source notes
notelist = self.display_note_list(source.get_note_list())
if notelist is not None:
sourcedetail += notelist
# additional media from Source (if any?)
if self.create_media and media_list:
sourcemedia = self.disp_add_img_as_gallery(media_list, source)
if sourcemedia is not None:
sourcedetail += sourcemedia
# Source Data Map...
src_data_map = self.write_srcattr(source.get_attribute_list())
if src_data_map is not None:
sourcedetail += src_data_map
# Source Repository list
if inc_repositories:
repo_list = self.dump_repository_ref_list(
source.get_reporef_list())
if repo_list is not None:
sourcedetail += repo_list
# Source references list
ref_list = self.display_bkref_list(Source, source_handle)
if ref_list is not None:
sourcedetail += ref_list
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(sourcepage, output_file, sio, ldatec)
#################################################
#
# creates the Media List Page and Media Pages
#
#################################################
class MediaPages(BasePage):
"""
This class is responsible for displaying information about the 'Media'
database objects. It displays this information under the 'Individuals'
tab. It is told by the 'add_instances' call which 'Media's to display,
and remembers the list of persons. A single call to 'display_pages'
displays both the Individual List (Index) page and all the Individual
pages.
The base class 'BasePage' is initialised once for each page that is
displayed.
"""
def __init__(self, report):
"""
@param: report -- The instance of the main report class for this report
"""
BasePage.__init__(self, report, title="")
self.media_dict = defaultdict(set)
def display_pages(self, title):
"""
Generate and output the pages under the Media tab, namely the media
index and the individual media pages.
@param: title -- Is the title of the web page
"""
LOG.debug("obj_dict[Media]")
for item in self.report.obj_dict[Media].items():
LOG.debug(" %s", str(item))
with self.r_user.progress(_("Narrated Web Site Report"),
_("Creating media pages"),
len(self.report.obj_dict[Media]) + 1
) as step:
# bug 8950 : it seems it's better to sort on desc + gid.
def sort_by_desc_and_gid(obj):
"""
Sort by media description and gramps ID
"""
return (obj.desc.lower(), obj.gramps_id)
sorted_media_handles = sorted(
self.report.obj_dict[Media].keys(),
key=lambda x: sort_by_desc_and_gid(
self.r_db.get_media_from_handle(x)))
self.medialistpage(self.report, title, sorted_media_handles)
prev = None
total = len(sorted_media_handles)
index = 1
for handle in sorted_media_handles:
gc.collect() # Reduce memory usage when there are many images.
next_ = None if index == total else sorted_media_handles[index]
step()
self.mediapage(self.report, title,
handle, (prev, next_, index, total))
prev = handle
index += 1
def medialistpage(self, report, title, sorted_media_handles):
"""
Generate and output the Media index page.
@param: report -- The instance of the main report class
for this report
@param: title -- Is the title of the web page
@param: sorted_media_handles -- A list of the handles of the media to be
displayed sorted by the media title
"""
BasePage.__init__(self, report, title)
output_file, sio = self.report.create_file("media")
medialistpage, head, body = self.write_header(self._('Media'))
ldatec = 0
# begin gallery division
with Html("div", class_="content", id="Gallery") as medialist:
body += medialist
msg = self._("This page contains an index of all the media objects "
"in the database, sorted by their title. Clicking on "
"the title will take you to that "
"media object’s page. "
"If you see media size dimensions "
"above an image, click on the "
"image to see the full sized version. ")
medialist += Html("p", msg, id="description")
# begin gallery table and table head
with Html("table",
class_="infolist primobjlist gallerylist") as table:
medialist += table
# begin table head
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
trow.extend(
Html("th", trans, class_=colclass, inline=True)
for trans, colclass in [
(" ", "ColumnRowLabel"),
(self._("Media | Name"), "ColumnName"),
(self._("Date"), "ColumnDate"),
(self._("Mime Type"), "ColumnMime")
]
)
# begin table body
tbody = Html("tbody")
table += tbody
index = 1
for media_handle in sorted_media_handles:
media = self.r_db.get_media_from_handle(media_handle)
if media:
if media.get_change_time() > ldatec:
ldatec = media.get_change_time()
title = media.get_description() or "[untitled]"
trow = Html("tr")
tbody += trow
media_data_row = [
[index, "ColumnRowLabel"],
[self.media_ref_link(media_handle,
title), "ColumnName"],
[self.rlocale.get_date(media.get_date_object()),
"ColumnDate"],
[media.get_mime_type(), "ColumnMime"]]
trow.extend(
Html("td", data, class_=colclass)
for data, colclass in media_data_row
)
index += 1
def sort_by_desc_and_gid(obj):
"""
Sort by media description and gramps ID
"""
return (obj.desc, obj.gramps_id)
unused_media_handles = []
if self.create_unused_media:
# add unused media
media_list = self.r_db.get_media_handles()
for media_ref in media_list:
if isinstance(media_ref, bytes):
media_handle = media_ref.decode("utf-8")
else:
media_handle = media_ref
if media_handle not in self.report.obj_dict[Media]:
unused_media_handles.append(media_handle)
unused_media_handles = sorted(
unused_media_handles,
key=lambda x: sort_by_desc_and_gid(
self.r_db.get_media_from_handle(x)))
idx = 1
prev = None
total = len(unused_media_handles)
if total > 0:
trow += Html("tr")
trow.extend(
Html("td", Html("h4", " "), inline=True) +
Html("td",
Html("h4",
self._("Below unused media objects"),
inline=True),
class_="") +
Html("td", Html("h4", " "), inline=True) +
Html("td", Html("h4", " "), inline=True)
)
for media_handle in unused_media_handles:
media = self.r_db.get_media_from_handle(media_handle)
gc.collect() # Reduce memory usage when many images.
next_ = None if idx == total else unused_media_handles[idx]
trow += Html("tr")
media_data_row = [
[index, "ColumnRowLabel"],
[self.media_ref_link(media_handle,
media.get_description()),
"ColumnName"],
[self.rlocale.get_date(media.get_date_object()),
"ColumnDate"],
[media.get_mime_type(), "ColumnMime"]]
trow.extend(
Html("td", data, class_=colclass)
for data, colclass in media_data_row
)
self.mediapage(self.report, title,
media_handle, (prev, next_, index, total))
prev = media_handle
index += 1
idx += 1
# add footer section
# add clearline for proper styling
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(medialistpage, output_file, sio, ldatec)
def media_ref_link(self, handle, name, uplink=False):
"""
Create a reference link to a media
@param: handle -- The media handle
@param: name -- The name to use for the link
@param: uplink -- If True, then "../../../" is inserted in front of the
result.
"""
# get media url
url = self.report.build_url_fname_html(handle, "img", uplink)
# get name
name = html_escape(name)
# begin hyper link
hyper = Html("a", name, href=url, title=name)
# return hyperlink to its callers
return hyper
def mediapage(self, report, title, media_handle, info):
"""
Generate and output an individual Media page.
@param: report -- The instance of the main report class
for this report
@param: title -- Is the title of the web page
@param: media_handle -- The media handle to use
@param: info -- A tuple containing the media handle for the
next and previous media, the current page
number, and the total number of media pages
"""
media = report.database.get_media_from_handle(media_handle)
BasePage.__init__(self, report, title, media.gramps_id)
(prev, next_, page_number, total_pages) = info
ldatec = media.get_change_time()
# get media rectangles
_region_items = self.media_ref_rect_regions(media_handle)
output_file, sio = self.report.create_file(media_handle, "img")
self.uplink = True
self.bibli = Bibliography()
# get media type to be used primarily with "img" tags
mime_type = media.get_mime_type()
#mtype = get_description(mime_type)
if mime_type:
#note_only = False
newpath = self.copy_source_file(media_handle, media)
target_exists = newpath is not None
else:
#note_only = True
target_exists = False
self.copy_thumbnail(media_handle, media)
self.page_title = media.get_description()
esc_page_title = html_escape(self.page_title)
(mediapage, head,
body) = self.write_header("%s - %s" % (self._("Media"),
self.page_title))
# if there are media rectangle regions, attach behaviour style sheet
if _region_items:
fname = "/".join(["css", "behaviour.css"])
url = self.report.build_url_fname(fname, None, self.uplink)
head += Html("link", href=url, type="text/css",
media="screen", rel="stylesheet")
# begin MediaDetail division
with Html("div", class_="content", id="GalleryDetail") as mediadetail:
body += mediadetail
# media navigation
with Html("div", id="GalleryNav", role="navigation") as medianav:
mediadetail += medianav
if prev:
medianav += self.media_nav_link(prev,
self._("Previous"), True)
data = self._('%(strong1_start)s%(page_number)d%(strong_end)s '
'of %(strong2_start)s%(total_pages)d%(strong_end)s'
) % {'strong1_start' :
'<strong id="GalleryCurrent">',
'strong2_start' :
'<strong id="GalleryTotal">',
'strong_end' : '</strong>',
'page_number' : page_number,
'total_pages' : total_pages}
medianav += Html("span", data, id="GalleryPages")
if next_:
medianav += self.media_nav_link(next_, self._("Next"), True)
# missing media error message
errormsg = self._("The file has been moved or deleted.")
# begin summaryarea division
with Html("div", id="summaryarea") as summaryarea:
mediadetail += summaryarea
if mime_type:
if mime_type.startswith("image"):
if not target_exists:
with Html("div", id="MediaDisplay") as mediadisplay:
summaryarea += mediadisplay
mediadisplay += Html("span", errormsg,
class_="MissingImage")
else:
# Check how big the image is relative to the
# requested 'initial' image size.
# If it's significantly bigger, scale it down to
# improve the site's responsiveness. We don't want
# the user to have to await a large download
# unnecessarily. Either way, set the display image
# size as requested.
orig_image_path = media_path_full(self.r_db,
media.get_path())
#mtime = os.stat(orig_image_path).st_mtime
(width, height) = image_size(orig_image_path)
max_width = self.report.options[
'maxinitialimagewidth']
max_height = self.report.options[
'maxinitialimageheight']
if width != 0 and height != 0:
scale_w = (float(max_width)/width) or 1
# the 'or 1' is so that a max of
# zero is ignored
scale_h = (float(max_height)/height) or 1
else:
scale_w = 1.0
scale_h = 1.0
scale = min(scale_w, scale_h, 1.0)
new_width = int(width*scale)
new_height = int(height*scale)
# TODO. Convert disk path to URL.
url = self.report.build_url_fname(orig_image_path,
None, self.uplink)
with Html("div", id="GalleryDisplay",
style='width: %dpx; height: %dpx' % (
new_width,
new_height)) as mediadisplay:
summaryarea += mediadisplay
# Feature #2634; display the mouse-selectable
# regions. See the large block at the top of
# this function where the various regions are
# stored in _region_items
if _region_items:
ordered = Html("ol", class_="RegionBox")
mediadisplay += ordered
while len(_region_items) > 0:
(name, coord_x, coord_y,
width, height, linkurl
) = _region_items.pop()
ordered += Html(
"li",
style="left:%d%%; "
"top:%d%%; "
"width:%d%%; "
"height:%d%%;" % (
coord_x, coord_y,
width, height)) + (
Html("a", name,
href=linkurl)
)
# display the image
if orig_image_path != newpath:
url = self.report.build_url_fname(
newpath, None, self.uplink)
mediadisplay += Html("a", href=url) + (
Html("img", width=new_width,
height=new_height, src=url,
alt=esc_page_title)
)
else:
dirname = tempfile.mkdtemp()
thmb_path = os.path.join(dirname, "document.png")
if run_thumbnailer(mime_type,
media_path_full(self.r_db,
media.get_path()),
thmb_path, 320):
try:
path = self.report.build_path(
"preview", media.get_handle())
npath = os.path.join(path, media.get_handle())
npath += ".png"
self.report.copy_file(thmb_path, npath)
path = npath
os.unlink(thmb_path)
except EnvironmentError:
path = os.path.join("images", "document.png")
else:
path = os.path.join("images", "document.png")
os.rmdir(dirname)
with Html("div", id="GalleryDisplay") as mediadisplay:
summaryarea += mediadisplay
img_url = self.report.build_url_fname(path,
None,
self.uplink)
if target_exists:
# TODO. Convert disk path to URL
url = self.report.build_url_fname(newpath,
None,
self.uplink)
hyper = Html("a", href=url,
title=esc_page_title) + (
Html("img", src=img_url,
alt=esc_page_title)
)
mediadisplay += hyper
else:
mediadisplay += Html("span", errormsg,
class_="MissingImage")
else:
with Html("div", id="GalleryDisplay") as mediadisplay:
summaryarea += mediadisplay
url = self.report.build_url_image("document.png",
"images", self.uplink)
mediadisplay += Html("img", src=url,
alt=esc_page_title,
title=esc_page_title)
# media title
title = Html("h3", html_escape(self.page_title.strip()),
inline=True)
summaryarea += title
# begin media table
with Html("table", class_="infolist gallery") as table:
summaryarea += table
# Gramps ID
media_gid = media.gramps_id
if not self.noid and media_gid:
trow = Html("tr") + (
Html("td", self._("Gramps ID"),
class_="ColumnAttribute",
inline=True),
Html("td", media_gid, class_="ColumnValue",
inline=True)
)
table += trow
# mime type
if mime_type:
trow = Html("tr") + (
Html("td", self._("File Type"),
class_="ColumnAttribute",
inline=True),
Html("td", mime_type, class_="ColumnValue",
inline=True)
)
table += trow
# media date
date = media.get_date_object()
if date and date is not Date.EMPTY:
trow = Html("tr") + (
Html("td", self._("Date"), class_="ColumnAttribute",
inline=True),
Html("td", self.rlocale.get_date(date),
class_="ColumnValue",
inline=True)
)
table += trow
# get media notes
notelist = self.display_note_list(media.get_note_list())
if notelist is not None:
mediadetail += notelist
# get attribute list
attrlist = media.get_attribute_list()
if attrlist:
attrsection, attrtable = self.display_attribute_header()
self.display_attr_list(attrlist, attrtable)
mediadetail += attrsection
# get media sources
srclist = self.display_media_sources(media)
if srclist is not None:
mediadetail += srclist
# get media references
reflist = self.display_bkref_list(Media, media_handle)
if reflist is not None:
mediadetail += reflist
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(mediapage, output_file, sio, ldatec)
def media_nav_link(self, handle, name, uplink=False):
"""
Creates the Media Page Navigation hyperlinks for Next and Prev
"""
url = self.report.build_url_fname_html(handle, "img", uplink)
name = html_escape(name)
return Html("a", name, name=name, id=name, href=url,
title=name, inline=True)
def display_media_sources(self, photo):
"""
Display media sources
@param: photo -- The source object (image, pdf, ...)
"""
list(map(
lambda i: self.bibli.add_reference(
self.r_db.get_citation_from_handle(i)),
photo.get_citation_list()))
sourcerefs = self.display_source_refs(self.bibli)
# return source references to its caller
return sourcerefs
def copy_source_file(self, handle, photo):
"""
Copy source file in the web tree.
@param: handle -- Handle of the source
@param: photo -- The source object (image, pdf, ...)
"""
ext = os.path.splitext(photo.get_path())[1]
to_dir = self.report.build_path('images', handle)
newpath = os.path.join(to_dir, handle) + ext
fullpath = media_path_full(self.r_db, photo.get_path())
if not os.path.isfile(fullpath):
_WRONGMEDIAPATH.append([photo.get_gramps_id(), fullpath])
return None
try:
mtime = os.stat(fullpath).st_mtime
if self.report.archive:
self.report.archive.add(fullpath, str(newpath))
else:
to_dir = os.path.join(self.html_dir, to_dir)
if not os.path.isdir(to_dir):
os.makedirs(to_dir)
new_file = os.path.join(self.html_dir, newpath)
shutil.copyfile(fullpath, new_file)
os.utime(new_file, (mtime, mtime))
return newpath
except (IOError, OSError) as msg:
error = _("Missing media object:"
) + "%s (%s)" % (photo.get_description(),
photo.get_gramps_id())
self.r_user.warn(error, str(msg))
return None
class ThumbnailPreviewPage(BasePage):
"""
This class is responsible for displaying information about
the Thumbnails page.
"""
def __init__(self, report, title, cb_progress):
"""
@param: report -- The instance of the main report class
for this report
@param: title -- Is the title of the web page
@param: cb_progress -- The step used for the progress bar.
"""
BasePage.__init__(self, report, title)
self.create_thumbs_only = report.options['create_thumbs_only']
# bug 8950 : it seems it's better to sort on desc + gid.
def sort_by_desc_and_gid(obj):
"""
Sort by media description and gramps ID
"""
return (obj.desc, obj.gramps_id)
self.photo_keys = sorted(self.report.obj_dict[Media],
key=lambda x: sort_by_desc_and_gid(
self.r_db.get_media_from_handle(x)))
if self.create_unused_media:
# add unused media
media_list = self.r_db.get_media_handles()
unused_media_handles = []
for media_ref in media_list:
if isinstance(media_ref, bytes):
media_handle = media_ref.decode("utf-8")
else:
media_handle = media_ref
if media_handle not in self.report.obj_dict[Media]:
self.photo_keys.append(media_handle)
media_list = []
for person_handle in self.photo_keys:
photo = self.r_db.get_media_from_handle(person_handle)
if photo:
if photo.get_mime_type().startswith("image"):
media_list.append((photo.get_description(), person_handle,
photo))
if self.create_thumbs_only:
self.copy_thumbnail(person_handle, photo)
media_list.sort(key=lambda x: self.rlocale.sort_key(x[0]))
# Create thumbnail preview page...
output_file, sio = self.report.create_file("thumbnails")
thumbnailpage, head, body = self.write_header(self._("Thumbnails"))
with Html("div", class_="content", id="Preview") as previewpage:
body += previewpage
msg = self._("This page displays a indexed list "
"of all the media objects "
"in this database. It is sorted by media title. "
"There is an index "
"of all the media objects in this database. "
"Clicking on a thumbnail "
"will take you to that image’s page.")
previewpage += Html("p", msg, id="description")
with Html("table", class_="calendar") as table:
previewpage += table
thead = Html("thead")
table += thead
# page title...
trow = Html("tr")
thead += trow
trow += Html("th", self._("Thumbnail Preview"),
class_="monthName", colspan=7, inline=True)
# table header cells...
trow = Html("tr")
thead += trow
ltrs = [" ", " ", " ",
" ", " ", " ", " "]
for ltr in ltrs:
trow += Html("th", ltr, class_="weekend", inline=True)
tbody = Html("tbody")
table += tbody
index, indexpos = 1, 0
num_of_images = len(media_list)
num_of_rows = ((num_of_images // 7) + 1)
num_of_cols = 7
grid_row = 0
while grid_row < num_of_rows:
trow = Html("tr", id="RowNumber: %08d" % grid_row)
tbody += trow
cols = 0
while cols < num_of_cols and indexpos < num_of_images:
ptitle = media_list[indexpos][0]
person_handle = media_list[indexpos][1]
photo = media_list[indexpos][2]
# begin table cell and attach to table row(trow)...
tcell = Html("td", class_="highlight weekend")
trow += tcell
# attach index number...
numberdiv = Html("div", class_="date")
tcell += numberdiv
# attach anchor name to date cell in upper right
# corner of grid...
numberdiv += Html("a", index, name=index, title=index,
inline=True)
# begin unordered list and
# attach to table cell(tcell)...
unordered = Html("ul")
tcell += unordered
# create thumbnail
(real_path,
newpath) = self.report.prepare_copy_media(photo)
newpath = self.report.build_url_fname(newpath)
list_html = Html("li")
unordered += list_html
# attach thumbnail to list...
list_html += self.thumb_hyper_image(newpath, "img",
person_handle,
ptitle)
index += 1
indexpos += 1
cols += 1
grid_row += 1
# if last row is incomplete, finish it off?
if grid_row == num_of_rows and cols < num_of_cols:
for emptycols in range(cols,
num_of_cols):
trow += Html("td", class_="emptyDays", inline=True)
# begin Thumbnail Reference section...
with Html("div", class_="subsection", id="references") as section:
body += section
section += Html("h4", self._("References"), inline=True)
with Html("table", class_="infolist") as table:
section += table
tbody = Html("tbody")
table += tbody
index = 1
for ptitle, person_handle, photo in media_list:
trow = Html("tr")
tbody += trow
tcell1 = Html("td",
self.thumbnail_link(ptitle, index),
class_="ColumnRowLabel")
tcell2 = Html("td", ptitle, class_="ColumnName")
trow += (tcell1, tcell2)
# increase index for row number...
index += 1
# increase progress meter...
cb_progress()
# add body id element
body.attr = 'id ="ThumbnailPreview"'
# add footer section
# add clearline for proper styling
footer = self.write_footer(None)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(thumbnailpage, output_file, sio, 0)
def thumbnail_link(self, name, index):
"""
creates a hyperlink for Thumbnail Preview Reference...
"""
return Html("a", index, title=html_escape(name), href="#%d" % index)
def thumb_hyper_image(self, thumbnail_url, subdir, fname, name):
"""
eplaces media_link() because it doesn't work for this instance
"""
name = html_escape(name)
url = "/".join(self.report.build_subdirs(subdir,
fname) + [fname]) + self.ext
with Html("div", class_="content", id="ThumbnailPreview") as section:
with Html("div", class_="snapshot") as snapshot:
section += snapshot
with Html("div", class_="thumbnail") as thumbnail:
snapshot += thumbnail
if not self.create_thumbs_only:
thumbnail_link = Html("a", href=url, title=name) + (
Html("img", src=thumbnail_url, alt=name)
)
else:
thumbnail_link = Html("img", src=thumbnail_url,
alt=name)
thumbnail += thumbnail_link
return section
class DownloadPage(BasePage):
"""
This class is responsible for displaying information about the Download page
"""
def __init__(self, report, title):
"""
@param: report -- The instance of the main report class for this report
@param: title -- Is the title of the web page
"""
BasePage.__init__(self, report, title)
# do NOT include a Download Page
if not self.report.inc_download:
return
# menu options for class
# download and description #1
dlfname1 = self.report.dl_fname1
dldescr1 = self.report.dl_descr1
# download and description #2
dlfname2 = self.report.dl_fname2
dldescr2 = self.report.dl_descr2
# if no filenames at all, return???
if dlfname1 or dlfname2:
output_file, sio = self.report.create_file("download")
downloadpage, head, body = self.write_header(self._('Download'))
# begin download page and table
with Html("div", class_="content", id="Download") as download:
body += download
msg = self._("This page is for the user/ creator "
"of this Family Tree/ Narrative website "
"to share a couple of files with you "
"regarding their family. If there are "
"any files listed "
"below, clicking on them will allow you "
"to download them. The "
"download page and files have the same "
"copyright as the remainder "
"of these web pages.")
download += Html("p", msg, id="description")
# begin download table and table head
with Html("table", class_="infolist download") as table:
download += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
trow.extend(
Html("th", label, class_="Column" + colclass,
inline=True)
for (label, colclass) in [
(self._("File Name"), "Filename"),
(self._("Description"), "Description"),
(self._("Last Modified"), "Modified")]
)
# table body
tbody = Html("tbody")
table += tbody
# if dlfname1 is not None, show it???
if dlfname1:
trow = Html("tr", id='Row01')
tbody += trow
fname = os.path.basename(dlfname1)
# TODO dlfname1 is filename, convert disk path to URL
tcell = Html("td", class_="ColumnFilename") + (
Html("a", fname, href=dlfname1,
title=html_escape(dldescr1))
)
trow += tcell
dldescr1 = dldescr1 or " "
trow += Html("td", dldescr1,
class_="ColumnDescription", inline=True)
tcell = Html("td", class_="ColumnModified", inline=True)
trow += tcell
if os.path.exists(dlfname1):
modified = os.stat(dlfname1).st_mtime
last_mod = datetime.datetime.fromtimestamp(modified)
tcell += last_mod
else:
tcell += " "
# if download filename #2, show it???
if dlfname2:
# begin row #2
trow = Html("tr", id='Row02')
tbody += trow
fname = os.path.basename(dlfname2)
tcell = Html("td", class_="ColumnFilename") + (
Html("a", fname, href=dlfname2,
title=html_escape(dldescr2))
)
trow += tcell
dldescr2 = dldescr2 or " "
trow += Html("td", dldescr2,
class_="ColumnDescription", inline=True)
tcell = Html("td", id='Col04',
class_="ColumnModified", inline=True)
trow += tcell
if os.path.exists(dlfname2):
modified = os.stat(dlfname2).st_mtime
last_mod = datetime.datetime.fromtimestamp(modified)
tcell += last_mod
else:
tcell += " "
# clear line for proper styling
# create footer section
footer = self.write_footer(None)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(downloadpage, output_file, sio, 0)
class ContactPage(BasePage):
"""
This class is responsible for displaying information about the 'Researcher'
"""
def __init__(self, report, title):
"""
@param: report -- The instance of the main report class for this report
@param: title -- Is the title of the web page
"""
BasePage.__init__(self, report, title)
output_file, sio = self.report.create_file("contact")
contactpage, head, body = self.write_header(self._('Contact'))
# begin contact division
with Html("div", class_="content", id="Contact") as section:
body += section
# begin summaryarea division
with Html("div", id='summaryarea') as summaryarea:
section += summaryarea
contactimg = self.add_image('contactimg', 200)
if contactimg is not None:
summaryarea += contactimg
# get researcher information
res = get_researcher()
with Html("div", id='researcher') as researcher:
summaryarea += researcher
if res.name:
res.name = res.name.replace(',,,', '')
researcher += Html("h3", res.name, inline=True)
if res.addr:
researcher += Html("span", res.addr,
id='streetaddress', inline=True)
if res.locality:
researcher += Html("span", res.locality,
id="locality", inline=True)
text = "".join([res.city, res.state, res.postal])
if text:
city = Html("span", res.city, id='city', inline=True)
state = Html("span", res.state, id='state', inline=True)
postal = Html("span", res.postal, id='postalcode',
inline=True)
researcher += (city, state, postal)
if res.country:
researcher += Html("span", res.country,
id='country', inline=True)
if res.email:
researcher += Html("span", id='email') + (
Html("a", res.email,
href='mailto:%s' % res.email, inline=True)
)
# add clear line for proper styling
summaryarea += FULLCLEAR
note_id = report.options['contactnote']
if note_id:
note = self.r_db.get_note_from_gramps_id(note_id)
note_text = self.get_note_format(note, False)
# attach note
summaryarea += note_text
# add clearline for proper styling
# add footer section
footer = self.write_footer(None)
body += (FULLCLEAR, footer)
# send page out for porcessing
# and close the file
self.xhtml_writer(contactpage, output_file, sio, 0)
#################################################
#
# creates the Individual List Page and IndividualPages
#
#################################################
class PersonPages(BasePage):
"""
This class is responsible for displaying information about the 'Person'
database objects. It displays this information under the 'Individuals'
tab. It is told by the 'add_instances' call which 'Person's to display,
and remembers the list of persons. A single call to 'display_pages'
displays both the Individual List (Index) page and all the Individual
pages.
The base class 'BasePage' is initialised once for each page that is
displayed.
"""
def __init__(self, report):
"""
@param: report -- The instance of the main report class for this report
"""
BasePage.__init__(self, report, title="")
self.ind_dict = defaultdict(set)
self.mapservice = None
self.sort_name = None
self.googleopts = None
self.googlemapkey = None
self.birthorder = None
self.person = None
self.familymappages = None
self.rel_class = None
self.placemappages = None
self.name = None
def display_pages(self, title):
"""
Generate and output the pages under the Individuals tab, namely the
individual index and the individual pages.
@param: title -- Is the title of the web page
"""
LOG.debug("obj_dict[Person]")
for item in self.report.obj_dict[Person].items():
LOG.debug(" %s", str(item))
with self.r_user.progress(_("Narrated Web Site Report"),
_('Creating individual pages'),
len(self.report.obj_dict[Person]) + 1
) as step:
self.individuallistpage(self.report, title,
self.report.obj_dict[Person].keys())
for person_handle in sorted(self.report.obj_dict[Person]):
step()
person = self.r_db.get_person_from_handle(person_handle)
self.individualpage(self.report, title, person)
#################################################
#
# creates the Individual List Page
#
#################################################
def individuallistpage(self, report, title, ppl_handle_list):
"""
Creates an individual page
@param: report -- The instance of the main report class
for this report
@param: title -- Is the title of the web page
@param: ppl_handle_list -- The list of people for whom we need
to create a page.
"""
BasePage.__init__(self, report, title)
prev_letter = " "
# plugin variables for this module
showbirth = report.options['showbirth']
showdeath = report.options['showdeath']
showpartner = report.options['showpartner']
showparents = report.options['showparents']
output_file, sio = self.report.create_file("individuals")
indlistpage, head, body = self.write_header(self._("Individuals"))
date = 0
# begin Individuals division
with Html("div", class_="content", id="Individuals") as individuallist:
body += individuallist
# Individual List page message
msg = self._("This page contains an index of all the individuals "
"in the database, sorted by their last names. "
"Selecting the person’s "
"name will take you to that "
"person’s individual page.")
individuallist += Html("p", msg, id="description")
# add alphabet navigation
index_list = get_first_letters(self.r_db, ppl_handle_list,
_KEYPERSON, rlocale=self.rlocale)
alpha_nav = alphabet_navigation(index_list, self.rlocale)
if alpha_nav is not None:
individuallist += alpha_nav
# begin table and table head
with Html("table",
class_="infolist primobjlist IndividualList") as table:
individuallist += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
# show surname and first name
trow += Html("th", self._("Surname"), class_="ColumnSurname",
inline=True)
trow += Html("th", self._("Given Name"), class_="ColumnName",
inline=True)
if showbirth:
trow += Html("th", self._("Birth"), class_="ColumnDate",
inline=True)
if showdeath:
trow += Html("th", self._("Death"), class_="ColumnDate",
inline=True)
if showpartner:
trow += Html("th", self._("Partner"),
class_="ColumnPartner",
inline=True)
if showparents:
trow += Html("th", self._("Parents"),
class_="ColumnParents",
inline=True)
tbody = Html("tbody")
table += tbody
ppl_handle_list = sort_people(self.r_db, ppl_handle_list,
self.rlocale)
first = True
for (surname, handle_list) in ppl_handle_list:
if surname and not surname.isspace():
letter = get_index_letter(first_letter(surname), index_list,
self.rlocale)
else:
letter = ' '
surname = self._("<absent>")
first_surname = True
for person_handle in sorted(handle_list,
key=self.sort_on_name_and_grampsid):
person = self.r_db.get_person_from_handle(person_handle)
if person.get_change_time() > date:
date = person.get_change_time()
# surname column
trow = Html("tr")
tbody += trow
tcell = Html("td", class_="ColumnSurname", inline=True)
trow += tcell
if first or primary_difference(letter, prev_letter,
self.rlocale):
first = False
first_surname = False
prev_letter = letter
trow.attr = 'class = "BeginSurname"'
tcell += Html(
"a", html_escape(surname), name=letter,
id_=letter,
title=self._("Surnames %(surname)s beginning "
"with letter %(letter)s") %
{'surname' : surname,
'letter' : letter})
elif first_surname:
first_surname = False
tcell += Html("a", html_escape(surname),
title="Surnames " + surname)
else:
tcell += " "
# firstname column
link = self.new_person_link(person_handle, person=person,
name_style=_NAME_STYLE_FIRST)
trow += Html("td", link, class_="ColumnName")
# birth column
if showbirth:
tcell = Html("td", class_="ColumnBirth", inline=True)
trow += tcell
birth_date = _find_birth_date(self.r_db, person)
if birth_date is not None:
if birth_date.fallback:
tcell += Html('em',
self.rlocale.get_date(birth_date),
inline=True)
else:
tcell += self.rlocale.get_date(birth_date)
else:
tcell += " "
# death column
if showdeath:
tcell = Html("td", class_="ColumnDeath", inline=True)
trow += tcell
death_date = _find_death_date(self.r_db, person)
if death_date is not None:
if death_date.fallback:
tcell += Html('em',
self.rlocale.get_date(death_date),
inline=True)
else:
tcell += self.rlocale.get_date(death_date)
else:
tcell += " "
# partner column
if showpartner:
family_list = person.get_family_handle_list()
first_family = True
#partner_name = None
tcell = ()
if family_list:
for family_handle in family_list:
family = self.r_db.get_family_from_handle(
family_handle)
partner_handle = utils.find_spouse(
person, family)
if partner_handle:
if not first_family:
# have to do this to get the comma on
# the same line as the link
if isinstance(tcell[-1], Html):
# tcell is an instance of Html (or
# of a subclass thereof)
tcell[-1].inside += ","
else:
tcell = tcell[:-1] + (
(tcell[-1] + ", "),)
# Have to manipulate as tuples so that
# subsequent people are not nested
# within the first link
tcell += (
self.new_person_link(partner_handle),)
first_family = False
else:
tcell = " "
trow += Html("td", class_="ColumnPartner") + tcell
# parents column
if showparents:
parent_hdl_list = person.get_parent_family_handle_list()
if parent_hdl_list:
parent_handle = parent_hdl_list[0]
family = self.r_db.get_family_from_handle(
parent_handle)
father_handle = family.get_father_handle()
mother_handle = family.get_mother_handle()
if father_handle:
father = self.r_db.get_person_from_handle(
father_handle)
else:
father = None
if mother_handle:
mother = self.r_db.get_person_from_handle(
mother_handle)
else:
mother = None
if father:
father_name = self.get_name(father)
if mother:
mother_name = self.get_name(mother)
samerow = False
if mother and father:
tcell = (Html("span", father_name,
class_="father fatherNmother",
inline=True),
Html("span", mother_name,
class_="mother", inline=True))
elif mother:
tcell = Html("span", mother_name,
class_="mother", inline=True)
elif father:
tcell = Html("span", father_name,
class_="father", inline=True)
else:
tcell = " "
samerow = True
else:
tcell = " "
samerow = True
trow += Html("td", class_="ColumnParents",
inline=samerow) + tcell
# create clear line for proper styling
# create footer section
footer = self.write_footer(date)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(indlistpage, output_file, sio, date)
#################################################
#
# creates an Individual Page
#
#################################################
gender_map = {
Person.MALE : _('male'),
Person.FEMALE : _('female'),
Person.UNKNOWN : _('unknown'),
}
def individualpage(self, report, title, person):
"""
Creates an individual page
@param: report -- The instance of the main report class for this report
@param: title -- Is the title of the web page
@param: person -- The person to use for this page.
"""
BasePage.__init__(self, report, title, person.get_gramps_id())
place_lat_long = []
self.person = person
self.bibli = Bibliography()
self.sort_name = self.get_name(person)
self.name = self.get_name(person)
date = self.person.get_change_time()
# to be used in the Family Map Pages...
self.familymappages = self.report.options['familymappages']
self.placemappages = self.report.options['placemappages']
self.mapservice = self.report.options['mapservice']
self.googleopts = self.report.options['googleopts']
self.googlemapkey = self.report.options['googlemapkey']
# decide if we will sort the birth order of siblings...
self.birthorder = self.report.options['birthorder']
# get the Relationship Calculator so that we can determine
# bio, half, step- siblings for use in display_ind_parents() ...
self.rel_class = self.report.rel_class
output_file, sio = self.report.create_file(person.get_handle(), "ppl")
self.uplink = True
indivdetpage, head, body = self.write_header(self.sort_name)
# attach the ancestortree style sheet if ancestor
# graph is being created?
if self.report.options["ancestortree"]:
if self.usecms:
fname = "/".join([self.target_uri, "css", "ancestortree.css"])
else:
fname = "/".join(["css", "ancestortree.css"])
url = self.report.build_url_fname(fname, None, self.uplink)
head += Html("link", href=url, type="text/css", media="screen",
rel="stylesheet")
# begin individualdetail division
with Html("div", class_="content",
id='IndividualDetail') as individualdetail:
body += individualdetail
# display a person's general data
thumbnail, name, summary = self.display_ind_general()
if thumbnail is not None:
individualdetail += thumbnail
individualdetail += (name, summary)
# display a person's events
sect2 = self.display_ind_events(place_lat_long)
if sect2 is not None:
individualdetail += sect2
# display relationship to the center person
sect3 = self.display_ind_center_person()
if sect3 is not None:
individualdetail += sect3
# display parents
sect4 = self.display_ind_parents()
if sect4 is not None:
individualdetail += sect4
# display relationships
relationships = self.display_relationships(self.person,
place_lat_long)
if relationships is not None:
individualdetail += relationships
# display LDS ordinance
sect5 = self.display_lds_ordinance(self.person)
if sect5 is not None:
individualdetail += sect5
# display address(es) and show sources
sect6 = self.display_addr_list(self.person.get_address_list(), True)
if sect6 is not None:
individualdetail += sect6
photo_list = self.person.get_media_list()
media_list = photo_list[:]
# if Family Pages are not being created, then include the Family
# Media objects? There is no reason to add these objects to the
# Individual Pages...
if not self.inc_families:
for handle in self.person.get_family_handle_list():
family = self.r_db.get_family_from_handle(handle)
if family:
media_list += family.get_media_list()
for evt_ref in family.get_event_ref_list():
event = self.r_db.get_event_from_handle(evt_ref.ref)
media_list += event.get_media_list()
# if the Event Pages are not being created, then include the Event
# Media objects? There is no reason to add these objects to the
# Individual Pages...
if not self.inc_events:
for evt_ref in self.person.get_primary_event_ref_list():
event = self.r_db.get_event_from_handle(evt_ref.ref)
if event:
media_list += event.get_media_list()
# display additional images as gallery
sect7 = self.disp_add_img_as_gallery(media_list, person)
if sect7 is not None:
individualdetail += sect7
# display Narrative Notes
notelist = person.get_note_list()
sect8 = self.display_note_list(notelist)
if sect8 is not None:
individualdetail += sect8
# display attributes
attrlist = person.get_attribute_list()
if attrlist:
attrsection, attrtable = self.display_attribute_header()
self.display_attr_list(attrlist, attrtable)
individualdetail += attrsection
# display web links
sect10 = self.display_url_list(self.person.get_url_list())
if sect10 is not None:
individualdetail += sect10
# display associations
assocs = person.get_person_ref_list()
if assocs:
individualdetail += self.display_ind_associations(assocs)
# for use in family map pages...
if len(place_lat_long) > 0:
if self.report.options["familymappages"]:
# save output_file, string_io and cur_fname
# before creating a new page
sof = output_file
sstring_io = sio
sfname = self.report.cur_fname
individualdetail += self.__display_family_map(
person, place_lat_long)
# restore output_file, string_io and cur_fname
# after creating a new page
output_file = sof
sio = sstring_io
self.report.cur_fname = sfname
# display pedigree
sect13 = self.display_ind_pedigree()
if sect13 is not None:
individualdetail += sect13
# display ancestor tree
if report.options['ancestortree']:
sect14 = self.display_tree()
if sect14 is not None:
individualdetail += sect14
# display source references
sect14 = self.display_ind_sources(person)
if sect14 is not None:
individualdetail += sect14
# add clearline for proper styling
# create footer section
footer = self.write_footer(date)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(indivdetpage, output_file, sio, date)
def __create_family_map(self, person, place_lat_long):
"""
creates individual family map page
@param: person -- person from database
@param: place_lat_long -- for use in Family Map Pages
"""
if not place_lat_long:
return
output_file, sio = self.report.create_file(person.get_handle(), "maps")
self.uplink = True
familymappage, head, body = self.write_header(self._("Family Map"))
minx, maxx = Decimal("0.00000001"), Decimal("0.00000001")
miny, maxy = Decimal("0.00000001"), Decimal("0.00000001")
xwidth, yheight = [], []
midx_, midy_, spanx, spany = [None]*4
number_markers = len(place_lat_long)
if number_markers > 1:
for (latitude, longitude, placetitle, handle,
date, etype) in place_lat_long:
xwidth.append(latitude)
yheight.append(longitude)
xwidth.sort()
yheight.sort()
minx = xwidth[0] if xwidth[0] else minx
maxx = xwidth[-1] if xwidth[-1] else maxx
minx, maxx = Decimal(minx), Decimal(maxx)
midx_ = str(Decimal((minx + maxx) /2))
miny = yheight[0] if yheight[0] else miny
maxy = yheight[-1] if yheight[-1] else maxy
miny, maxy = Decimal(miny), Decimal(maxy)
midy_ = str(Decimal((miny + maxy) /2))
midx_, midy_ = conv_lat_lon(midx_, midy_, "D.D8")
# get the integer span of latitude and longitude
spanx = int(maxx - minx)
spany = int(maxy - miny)
# set zoom level based on span of Longitude?
tinyset = [value for value in (-3, -2, -1, 0, 1, 2, 3)]
smallset = [value for value in (-4, -5, -6, -7, 4, 5, 6, 7)]
middleset = [value for value in (-8, -9, -10, -11, 8, 9, 10, 11)]
largeset = [value for value in (-11, -12, -13, -14, -15, -16,
-17, 11, 12, 13, 14, 15, 16, 17)]
if spany in tinyset or spany in smallset:
zoomlevel = 6
elif spany in middleset:
zoomlevel = 5
elif spany in largeset:
zoomlevel = 4
else:
zoomlevel = 3
# 0 = latitude, 1 = longitude, 2 = place title,
# 3 = handle, and 4 = date, 5 = event type...
# being sorted by date, latitude, and longitude...
place_lat_long = sorted(place_lat_long, key=itemgetter(4, 0, 1))
# for all plugins
# if family_detail_page
# if active
# call_(report, up, head)
# add narrative-maps style sheet
if self.usecms:
fname = "/".join([self.target_uri, "css", "narrative-maps.css"])
else:
fname = "/".join(["css", "narrative-maps.css"])
url = self.report.build_url_fname(fname, None, self.uplink)
head += Html("link", href=url, type="text/css", media="screen",
rel="stylesheet")
# add MapService specific javascript code
if self.mapservice == "Google":
src_js = GOOGLE_MAPS + "api/js?sensor=false"
if self.googlemapkey:
src_js += "&key=" + self.googlemapkey
head += Html("script", type="text/javascript",
src=src_js, inline=True)
else:
url = "http://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css"
head += Html("link", href=url, type="text/javascript",
rel="stylesheet")
src_js = "http://ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js"
head += Html("script", type="text/javascript",
src=src_js, inline=True)
src_js = "http://openlayers.org/en/v3.17.1/build/ol.js"
head += Html("script", type="text/javascript",
src=src_js, inline=True)
url = "http://openlayers.org/en/v3.17.1/css/ol.css"
head += Html("link", href=url, type="text/javascript",
rel="stylesheet")
src_js = "http://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"
head += Html("script", type="text/javascript",
src=src_js, inline=True)
if number_markers > 0:
tracelife = "["
seq_ = 1
for index in range(0, (number_markers - 1)):
(latitude, longitude, placetitle, handle, date,
etype) = place_lat_long[index]
# are we using Google?
if self.mapservice == "Google":
# are we creating Family Links?
if self.googleopts == "FamilyLinks":
tracelife += """
new google.maps.LatLng(%s, %s),""" % (latitude, longitude)
# are we creating Drop Markers or Markers?
elif self.googleopts in ["Drop", "Markers"]:
tracelife += """
['%s', %s, %s, %d],""" % (placetitle.replace("'", "\\'"), latitude,
longitude, seq_)
# are we using OpenStreetMap?
else:
tracelife += """
[%f, %f, \'%s\'],""" % (float(longitude), float(latitude),
placetitle.replace("'", "\\'"))
seq_ += 1
# FIXME: The last element in the place_lat_long list is treated
# specially, and the code above is apparently repeated so as to
# avoid a comma at the end, and get the right closing. This is very
# ugly.
(latitude, longitude, placetitle, handle, date,
etype) = place_lat_long[-1]
# are we using Google?
if self.mapservice == "Google":
# are we creating Family Links?
if self.googleopts == "FamilyLinks":
tracelife += """
new google.maps.LatLng(%s, %s)
];""" % (latitude, longitude)
# are we creating Drop Markers or Markers?
elif self.googleopts in ["Drop", "Markers"]:
tracelife += """
['%s', %s, %s, %d]
];""" % (placetitle.replace("'", "\\'"), latitude, longitude, seq_)
# are we using OpenStreetMap?
elif self.mapservice == "OpenStreetMap":
tracelife += """
[%f, %f, \'%s\']
];""" % (float(longitude), float(latitude), placetitle.replace("'", "\\'"))
# begin MapDetail division...
with Html("div", class_="content", id="FamilyMapDetail") as mapdetail:
body += mapdetail
# add page title
mapdetail += Html("h3",
html_escape(
self._("Tracking %s") % self.get_name(person)),
inline=True)
# page description
msg = self._("This map page represents that person "
"and any descendants with "
"all of their event/ places. If you place your mouse over "
"the marker it will display the place name. "
"The markers and the Reference "
"list are sorted in date order (if any?). "
"Clicking on a place’s "
"name in the Reference section will take you "
"to that place’s page.")
mapdetail += Html("p", msg, id="description")
# this is the style element where the Map is held in the CSS...
with Html("div", id="map_canvas") as canvas:
mapdetail += canvas
# begin javascript inline code...
with Html("script", deter="deter",
style='width =100%; height =100%;',
type="text/javascript", indent=False) as jsc:
head += jsc
# Link to GRAMPS marker
fname = "/".join(['images', 'marker.png'])
marker_path = self.report.build_url_image("marker.png",
"images",
self.uplink)
jsc += MARKER_PATH % marker_path
# are we using Google?
if self.mapservice == "Google":
# are we creating Family Links?
if self.googleopts == "FamilyLinks":
if midy_ == None:
jsc += FAMILYLINKS % (tracelife, latitude,
longitude, int(10))
else:
jsc += FAMILYLINKS % (tracelife, midx_, midy_,
zoomlevel)
# are we creating Drop Markers?
elif self.googleopts == "Drop":
if midy_ == None:
jsc += DROPMASTERS % (tracelife, latitude,
longitude, int(10))
else:
jsc += DROPMASTERS % (tracelife, midx_, midy_,
zoomlevel)
# we are creating Markers only...
else:
if midy_ == None:
jsc += MARKERS % (tracelife, latitude,
longitude, int(10))
else:
jsc += MARKERS % (tracelife, midx_, midy_,
zoomlevel)
# we are using OpenStreetMap...
else:
if midy_ == None:
jsc += OSM_MARKERS % (tracelife,
longitude,
latitude, 10)
else:
jsc += OSM_MARKERS % (tracelife, midy_, midx_,
zoomlevel)
# if Google and Drop Markers are selected,
# then add "Drop Markers" button?
if self.mapservice == "Google" and self.googleopts == "Drop":
mapdetail += Html("button", _("Drop Markers"),
id="drop", onclick="drop()", inline=True)
# add div for popups.
with Html("div", id="popup", inline=True) as popup:
mapdetail += popup
# begin place reference section and its table...
with Html("div", class_="subsection", id="references") as section:
mapdetail += section
section += Html("h4", self._("References"), inline=True)
with Html("table", class_="infolist") as table:
section += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
trow.extend(
Html("th", label, class_=colclass, inline=True)
for (label, colclass) in [
(_("Date"), "ColumnDate"),
(_("Place Title"), "ColumnPlace"),
(_("Event Type"), "ColumnType")
]
)
tbody = Html("tbody")
table += tbody
for (latitude, longitude, placetitle, handle, date,
etype) in place_lat_long:
trow = Html("tr")
tbody += trow
trow.extend(
Html("td", data, class_=colclass, inline=True)
for data, colclass in [
(date, "ColumnDate"),
(self.place_link(handle, placetitle,
uplink=True),
"ColumnPlace"),
(str(etype), "ColumnType")
]
)
# add body id for this page...
body.attr = 'id ="FamilyMap" onload ="initialize()"'
# add clearline for proper styling
# add footer section
footer = self.write_footer(None)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(familymappage, output_file, sio, 0)
def __display_family_map(self, person, place_lat_long):
"""
Create the family map link
@param: person -- The person to set in the box
@param: place_lat_long -- The center of the box
"""
# create family map page
self.__create_family_map(person, place_lat_long)
# begin family map division plus section title
with Html("div", class_="subsection", id="familymap") as familymap:
familymap += Html("h4", self._("Family Map"), inline=True)
# add family map link
person_handle = person.get_handle()
url = self.report.build_url_fname_html(person_handle, "maps", True)
familymap += self.family_map_link(person_handle, url)
# return family map link to its caller
return familymap
def draw_box(self, center, col, person):
"""
Draw the box around the AncestorTree Individual name box...
@param: center -- The center of the box
@param: col -- The generation number
@param: person -- The person to set in the box
"""
top = center - _HEIGHT/2
xoff = _XOFFSET+col*(_WIDTH+_HGAP)
sex = person.gender
if sex == Person.MALE:
divclass = "male"
elif sex == Person.FEMALE:
divclass = "female"
else:
divclass = "unknown"
boxbg = Html("div", class_="boxbg %s AncCol%s" % (divclass, col),
style="top: %dpx; left: %dpx;" % (top, xoff+1)
)
person_name = self.get_name(person)
# This does not use [new_]person_link because the requirements are
# unique
result = self.report.obj_dict.get(Person).get(person.handle)
if result is None or result[0] == "":
# The person is not included in the webreport or there is no link
# to them
boxbg += Html("span", person_name, class_="unlinked", inline=True)
else:
thumbnail_url = None
if self.create_media and col < 5:
photolist = person.get_media_list()
if photolist:
photo_handle = photolist[0].get_reference_handle()
photo = self.r_db.get_media_from_handle(photo_handle)
mime_type = photo.get_mime_type()
if mime_type:
region = self.media_ref_region_to_object(photo_handle,
person)
if region:
# make a thumbnail of this region
newpath = self.copy_thumbnail(
photo_handle, photo, region)
# TODO. Check if build_url_fname can be used.
newpath = "/".join(['..']*3 + [newpath])
if win():
newpath = newpath.replace('\\', "/")
thumbnail_url = newpath
else:
(photo_url,
thumbnail_url) = self.report.prepare_copy_media(
photo)
thumbnail_url = "/".join(['..']*3 + [thumbnail_url])
if win():
thumbnail_url = thumbnail_url.replace('\\', "/")
url = self.report.build_url_fname_html(person.handle, "ppl", True)
if thumbnail_url is None:
boxbg += Html("a", href=url, class_="noThumb") + person_name
else:
thumb = Html("span", class_="thumbnail") + (
Html("img", src=thumbnail_url, alt="Image: " + person_name))
boxbg += Html("a", href=url) + thumb + person_name
shadow = Html(
"div", class_="shadow", inline=True,
style="top: %dpx; left: %dpx;" % (top + _SHADOW, xoff + _SHADOW))
return [boxbg, shadow]
def extend_line(self, coord_y0, coord_x0):
"""
Draw and extended line
@param: coord_y0 -- The starting point
@param: coord_x0 -- The end of the line
"""
style = "top: %dpx; left: %dpx; width: %dpx"
ext_bv = Html("div", class_="bvline", inline=True,
style=style % (coord_y0, coord_x0, _HGAP/2)
)
ext_gv = Html("div", class_="gvline", inline=True,
style=style % (coord_y0+_SHADOW,
coord_x0, _HGAP/2+_SHADOW)
)
return [ext_bv, ext_gv]
def connect_line(self, coord_y0, coord_y1, col):
"""
We need to draw a line between to points
@param: coord_y0 -- The starting point
@param: coord_y1 -- The end of the line
@param: col -- The generation number
"""
coord_y = min(coord_y0, coord_y1)
stylew = "top: %dpx; left: %dpx; width: %dpx;"
styleh = "top: %dpx; left: %dpx; height: %dpx;"
coord_x0 = _XOFFSET + col * _WIDTH + (col-1)*_HGAP + _HGAP/2
cnct_bv = Html("div", class_="bvline", inline=True,
style=stylew % (coord_y1, coord_x0, _HGAP/2))
cnct_gv = Html("div", class_="gvline", inline=True,
style=stylew % (coord_y1+_SHADOW,
coord_x0+_SHADOW,
_HGAP/2+_SHADOW))
cnct_bh = Html("div", class_="bhline", inline=True,
style=styleh % (coord_y, coord_x0,
abs(coord_y0-coord_y1)))
cnct_gh = Html("div", class_="gvline", inline=True,
style=styleh % (coord_y+_SHADOW,
coord_x0+_SHADOW,
abs(coord_y0-coord_y1)))
return [cnct_bv, cnct_gv, cnct_bh, cnct_gh]
def draw_connected_box(self, center1, center2, col, handle):
"""
Draws the connected box for Ancestor Tree on the Individual Page
@param: center1 -- The first box to connect
@param: center2 -- The destination box to draw
@param: col -- The generation number
@param: handle -- The handle of the person to set in the new box
"""
box = []
if not handle:
return box
person = self.r_db.get_person_from_handle(handle)
box = self.draw_box(center2, col, person)
box += self.connect_line(center1, center2, col)
return box
def display_tree(self):
"""
Display the Ancestor Tree
"""
tree = []
if not self.person.get_main_parents_family_handle():
return None
generations = self.report.options['graphgens']
max_in_col = 1 << (generations-1)
max_size = _HEIGHT*max_in_col + _VGAP*(max_in_col+1)
center = int(max_size/2)
with Html("div", id="tree", class_="subsection") as tree:
tree += Html("h4", self._('Ancestors'), inline=True)
with Html("div", id="treeContainer",
style="width:%dpx; height:%dpx;" % (
_XOFFSET+(generations)*_WIDTH+(generations-1)*_HGAP,
max_size)
) as container:
tree += container
container += self.draw_tree(1, generations, max_size,
0, center, self.person.handle)
return tree
def draw_tree(self, gen_nr, maxgen, max_size, old_center,
new_center, person_handle):
"""
Draws the Ancestor Tree
@param: gen_nr -- The generation number to draw
@param: maxgen -- The maximum number of generations to draw
@param: max_size -- The maximum size of the drawing area
@param: old_center -- The position of the old box
@param: new_center -- The position of the new box
@param: person_handle -- The handle of the person to draw
"""
tree = []
if gen_nr > maxgen:
return tree
gen_offset = int(max_size / pow(2, gen_nr+1))
if person_handle:
person = self.r_db.get_person_from_handle(person_handle)
else:
person = None
if not person:
return tree
if gen_nr == 1:
tree = self.draw_box(new_center, 0, person)
else:
tree = self.draw_connected_box(old_center, new_center,
gen_nr-1, person_handle)
if gen_nr == maxgen:
return tree
family_handle = person.get_main_parents_family_handle()
if family_handle:
line_offset = _XOFFSET + gen_nr*_WIDTH + (gen_nr-1)*_HGAP
tree += self.extend_line(new_center, line_offset)
family = self.r_db.get_family_from_handle(family_handle)
f_center = new_center-gen_offset
f_handle = family.get_father_handle()
tree += self.draw_tree(gen_nr+1, maxgen, max_size,
new_center, f_center, f_handle)
m_center = new_center+gen_offset
m_handle = family.get_mother_handle()
tree += self.draw_tree(gen_nr+1, maxgen, max_size,
new_center, m_center, m_handle)
return tree
def display_ind_associations(self, assoclist):
"""
Display an individual's associations
@param: assoclist -- The list of persons for association
"""
# begin Associations division
with Html("div", class_="subsection", id="Associations") as section:
section += Html("h4", self._('Associations'), inline=True)
with Html("table", class_="infolist assoclist") as table:
section += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
assoc_row = [
(self._("Person"), 'Person'),
(self._('Relationship'), 'Relationship'),
(self._("Notes"), 'Notes'),
(self._("Sources"), 'Sources'),
]
trow.extend(
Html("th", label, class_="Column" + colclass, inline=True)
for (label, colclass) in assoc_row)
tbody = Html("tbody")
table += tbody
for person_ref in assoclist:
trow = Html("tr")
tbody += trow
person_lnk = self.new_person_link(person_ref.ref,
uplink=True)
index = 0
for data in [
person_lnk,
person_ref.get_relation(),
self.dump_notes(person_ref.get_note_list()),
self.get_citation_links(
person_ref.get_citation_list()),
]:
# get colclass from assoc_row
colclass = assoc_row[index][1]
trow += Html("td", data, class_="Column" + colclass,
inline=True)
index += 1
# return section to its callers
return section
def display_ind_pedigree(self):
"""
Display an individual's pedigree
"""
birthorder = self.report.options["birthorder"]
# Define helper functions
def children_ped(ol_html):
"""
Create a children list
@param: ol_html -- The html element to complete
"""
if family:
childlist = family.get_child_ref_list()
childlist = [child_ref.ref for child_ref in childlist]
children = add_birthdate(self.r_db, childlist)
if birthorder:
children = sorted(children)
for birthdate, handle in children:
if handle == self.person.get_handle():
child_ped(ol_html)
elif handle:
child = self.r_db.get_person_from_handle(handle)
if child:
ol_html += Html("li") + self.pedigree_person(child)
else:
child_ped(ol_html)
return ol_html
def child_ped(ol_html):
"""
Create a child element list
@param: ol_html -- The html element to complete
"""
with Html("li", self.name, class_="thisperson") as pedfam:
family = self.pedigree_family()
if family:
pedfam += Html("ol", class_="spouselist") + family
return ol_html + pedfam
# End of helper functions
parent_handle_list = self.person.get_parent_family_handle_list()
if parent_handle_list:
parent_handle = parent_handle_list[0]
family = self.r_db.get_family_from_handle(parent_handle)
father_handle = family.get_father_handle()
mother_handle = family.get_mother_handle()
if mother_handle:
mother = self.r_db.get_person_from_handle(mother_handle)
else:
mother = None
if father_handle:
father = self.r_db.get_person_from_handle(father_handle)
else:
father = None
else:
family = None
father = None
mother = None
with Html("div", id="pedigree", class_="subsection") as ped:
ped += Html("h4", self._('Pedigree'), inline=True)
with Html("ol", class_="pedigreegen") as pedol:
ped += pedol
if father and mother:
pedfa = Html("li") + self.pedigree_person(father)
pedol += pedfa
with Html("ol") as pedma:
pedfa += pedma
pedma += (Html("li", class_="spouse") +
self.pedigree_person(mother) +
children_ped(Html("ol"))
)
elif father:
pedol += (Html("li") + self.pedigree_person(father) +
children_ped(Html("ol"))
)
elif mother:
pedol += (Html("li") + self.pedigree_person(mother) +
children_ped(Html("ol"))
)
else:
pedol += (Html("li") + children_ped(Html("ol")))
return ped
def display_ind_general(self):
"""
display an individual's general information...
"""
self.page_title = self.sort_name
thumbnail = self.disp_first_img_as_thumbnail(
self.person.get_media_list(), self.person)
section_title = Html("h3", html_escape(self.page_title),
inline=True) + (
Html('sup') + (
Html('small') +
self.get_citation_links(
self.person.get_citation_list())))
# begin summaryarea division
with Html("div", id='summaryarea') as summaryarea:
# begin general details table
with Html("table", class_="infolist") as table:
summaryarea += table
primary_name = self.person.get_primary_name()
all_names = [primary_name] + self.person.get_alternate_names()
# if the callname or the nickname is the same as the 'first
# name' (given name), then they are not displayed.
first_name = primary_name.get_first_name()
# Names [and their sources]
for name in all_names:
pname = html_escape(_nd.display_name(name))
pname += self.get_citation_links(name.get_citation_list())
# if we have just a firstname, then the name is preceeded
# by ", " which doesn't exactly look very nice printed on
# the web page
if pname[:2] == ', ':
pname = pname[2:]
if name != primary_name:
datetext = self.rlocale.get_date(name.date)
if datetext:
pname = datetext + ': ' + pname
type_ = self._(name.get_type().xml_str())
trow = Html("tr") + (
Html("td", type_, class_="ColumnAttribute",
inline=True)
)
tcell = Html("td", pname, class_="ColumnValue")
# display any notes associated with this name
notelist = name.get_note_list()
if len(notelist):
unordered = Html("ul")
for notehandle in notelist:
note = self.r_db.get_note_from_handle(notehandle)
if note:
note_text = self.get_note_format(note, True)
# attach note
unordered += note_text
tcell += unordered
trow += tcell
table += trow
# display the callname associated with this name.
call_name = name.get_call_name()
if call_name and call_name != first_name:
trow = Html("tr") + (
Html("td", _("Call Name"), class_="ColumnAttribute",
inline=True),
Html("td", call_name, class_="ColumnValue",
inline=True)
)
table += trow
# display the nickname associated with this name. Note that
# this no longer displays the Nickname attribute (if
# present), because the nickname attribute is deprecated in
# favour of the nick_name property of the name structure
# (see http://gramps.1791082.n4.nabble.com/Where-is-
# nickname-stored-tp4469779p4484272.html), and also because
# the attribute is (normally) displayed lower down the
# wNarrative Web report.
nick_name = name.get_nick_name()
if nick_name and nick_name != first_name:
trow = Html("tr") + (
Html("td", self._("Nick Name"),
class_="ColumnAttribute",
inline=True),
Html("td", nick_name, class_="ColumnValue",
inline=True)
)
table += trow
# Gramps ID
person_gid = self.person.get_gramps_id()
if not self.noid and person_gid:
trow = Html("tr") + (
Html("td", self._("Gramps ID"),
class_="ColumnAttribute",
inline=True),
Html("td", person_gid, class_="ColumnValue",
inline=True)
)
table += trow
# Gender
gender = self._(self.gender_map[self.person.gender])
trow = Html("tr") + (
Html("td", self._("Gender"), class_="ColumnAttribute",
inline=True),
Html("td", gender, class_="ColumnValue", inline=True)
)
table += trow
# Age At Death???
birth_date = Date.EMPTY
birth_ref = self.person.get_birth_ref()
if birth_ref:
birth = self.r_db.get_event_from_handle(birth_ref.ref)
if birth:
birth_date = birth.get_date_object()
if birth_date and birth_date is not Date.EMPTY:
alive = probably_alive(self.person, self.r_db, Today())
death_date = _find_death_date(self.r_db, self.person)
if not alive and death_date is not None:
nyears = death_date - birth_date
nyears = nyears.format(precision=3,
dlocale=self.rlocale)
trow = Html("tr") + (
Html("td", self._("Age at Death"),
class_="ColumnAttribute", inline=True),
Html("td", nyears,
class_="ColumnValue", inline=True)
)
table += trow
# return all three pieces to its caller
# do NOT combine before returning
return thumbnail, section_title, summaryarea
def display_ind_events(self, place_lat_long):
"""
will create the events table
@param: place_lat_long -- For use in Family Map Pages. This will be None
if called from Family pages, which do not
create a Family Map
"""
event_ref_list = self.person.get_event_ref_list()
if not event_ref_list:
return None
# begin events division and section title
with Html("div", id="events", class_="subsection") as section:
section += Html("h4", self._("Events"), inline=True)
# begin events table
with Html("table", class_="infolist eventlist") as table:
section += table
thead = Html("thead")
table += thead
# attach event header row
thead += self.event_header_row()
tbody = Html("tbody")
table += tbody
for evt_ref in event_ref_list:
event = self.r_db.get_event_from_handle(evt_ref.ref)
if event:
# display event row
tbody += self.display_event_row(event, evt_ref,
place_lat_long,
True, True,
EventRoleType.PRIMARY)
return section
def display_parent(self, handle, title, rel):
"""
This will display a parent ...
@param: handle -- The person handle
@param: title -- Is the title of the web page
@param: rel -- The relation
"""
tcell1 = Html("td", title, class_="ColumnAttribute", inline=True)
tcell2 = Html("td", class_="ColumnValue")
tcell2 += self.new_person_link(handle, uplink=True)
if rel and rel != ChildRefType(ChildRefType.BIRTH):
tcell2 += ''.join([' '] *3 + ['(%s)']) % str(rel)
# return table columns to its caller
return tcell1, tcell2
def get_reln_in_family(self, ind, family):
"""
Display the relation of the indiv in the family
@param: ind -- The person to use
@param: family -- The family
"""
child_handle = ind.get_handle()
child_ref_list = family.get_child_ref_list()
for child_ref in child_ref_list:
if child_ref.ref == child_handle:
return (child_ref.get_father_relation(),
child_ref.get_mother_relation())
return (None, None)
def display_ind_parent_family(self, birthmother, birthfather, family,
table,
first=False):
"""
Display the individual parent family
@param: birthmother -- The birth mother
@param: birthfather -- The birth father
@param: family -- The family
@param: table -- The html document to complete
@param: first -- Is this the first indiv ?
"""
if not first:
trow = Html("tr") + (Html("td", " ", colspan=3,
inline=True))
table += trow
# get the father
father_handle = family.get_father_handle()
if father_handle:
if father_handle == birthfather:
# The parent may not be birth father in ths family, because it
# may be a step family. However, it will be odd to display the
# parent as anything other than "Father"
reln = self._("Father")
else:
# Stepfather may not always be quite right (for example, it may
# actually be StepFather-in-law), but it is too expensive to
# calculate out the correct relationship using the Relationship
# Calculator
reln = self._("Stepfather")
trow = Html("tr") + (self.display_parent(father_handle, reln, None))
table += trow
# get the mother
mother_handle = family.get_mother_handle()
if mother_handle:
if mother_handle == birthmother:
reln = self._("Mother")
else:
reln = self._("Stepmother")
trow = Html("tr") + (self.display_parent(mother_handle, reln, None))
table += trow
for child_ref in family.get_child_ref_list():
child_handle = child_ref.ref
child = self.r_db.get_person_from_handle(child_handle)
if child:
if child == self.person:
reln = ""
else:
try:
# We have a try except block here, because the two
# people MUST be siblings for the called Relationship
# routines to work. Depending on your definition of
# sibling, we cannot necessarily guarantee that.
sibling_type = self.rel_class.get_sibling_type(
self.r_db, self.person, child)
reln = self.rel_class.get_sibling_relationship_string(
sibling_type, self.person.gender, child.gender)
# We have a problem here : reln is never in the choosen
# language but in the default language.
# Does get_sibling_relationship_string work ?
reln = reln[0].upper() + reln[1:]
except:
reln = self._("Not siblings")
reln = " " + reln
# Now output reln, child_link, (frel, mrel)
frel = child_ref.get_father_relation()
mrel = child_ref.get_mother_relation()
if frel != ChildRefType.BIRTH or mrel != ChildRefType.BIRTH:
frelmrel = "(%s, %s)" % (str(frel), str(mrel))
else:
frelmrel = ""
trow = Html("tr") + (
Html("td", reln, class_="ColumnAttribute", inline=True))
tcell = Html("td", class_="ColumnValue", inline=True)
tcell += " "
tcell += self.display_child_link(child_handle)
trow += tcell
tcell = Html("td", frelmrel, class_="ColumnValue",
inline=True)
trow += tcell
table += trow
def display_step_families(self, parent_handle,
family,
all_family_handles,
birthmother, birthfather,
table):
"""
Display step families
@param: parent_handle -- The family parent handle to display
@param: family -- The family
@param: all_family_handles -- All known family handles
@param: birthmother -- The birth mother
@param: birthfather -- The birth father
@param: table -- The html document to complete
"""
if parent_handle:
parent = self.r_db.get_person_from_handle(parent_handle)
for parent_family_handle in parent.get_family_handle_list():
if parent_family_handle not in all_family_handles:
parent_family = self.r_db.get_family_from_handle(
parent_family_handle)
self.display_ind_parent_family(birthmother, birthfather,
parent_family, table)
all_family_handles.append(parent_family_handle)
def display_ind_center_person(self):
"""
Display the person's relationship to the center person
"""
center_person = self.r_db.get_person_from_gramps_id(
self.report.options['pid'])
relationship = self.rel_class.get_one_relationship(self.r_db,
self.person,
center_person)
if relationship == "": # No relation to display
return
# begin center_person division
section = ""
with Html("div", class_="subsection", id="parents") as section:
message = self._("Relation to the center person")
message += " ("
name_format = self.report.options['name_format']
primary_name = center_person.get_primary_name()
name = Name(primary_name)
name.set_display_as(name_format)
message += _nd.display_name(name)
message += ") : "
message += relationship
section += Html("h4", message, inline=True)
return section
def display_ind_parents(self):
"""
Display a person's parents
"""
parent_list = self.person.get_parent_family_handle_list()
if not parent_list:
return None
# begin parents division
with Html("div", class_="subsection", id="parents") as section:
section += Html("h4", self._("Parents"), inline=True)
# begin parents table
with Html("table", class_="infolist") as table:
section += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
trow.extend(
Html("th", label, class_=colclass, inline=True)
for (label, colclass) in [
(self._("Relation to main person"), "ColumnAttribute"),
(self._("Name"), "ColumnValue"),
(self._("Relation within this family "
"(if not by birth)"),
"ColumnValue")
]
)
tbody = Html("tbody")
table += tbody
all_family_handles = list(parent_list)
(birthmother, birthfather) = self.rel_class.get_birth_parents(
self.r_db, self.person)
first = True
for family_handle in parent_list:
family = self.r_db.get_family_from_handle(family_handle)
if family:
# Display this family
self.display_ind_parent_family(birthmother,
birthfather,
family, table, first)
first = False
if self.report.options['showhalfsiblings']:
# Display all families in which the parents are
# involved. This displays half siblings and step
# siblings
self.display_step_families(
family.get_father_handle(), family,
all_family_handles,
birthmother, birthfather, table)
self.display_step_families(
family.get_mother_handle(), family,
all_family_handles,
birthmother, birthfather, table)
return section
def pedigree_person(self, person):
"""
will produce a hyperlink for a pedigree person ...
@param: person -- The person
"""
hyper = self.new_person_link(person.handle, person=person, uplink=True)
return hyper
def pedigree_family(self):
"""
Returns a family pedigree
"""
ped = []
for family_handle in self.person.get_family_handle_list():
rel_family = self.r_db.get_family_from_handle(family_handle)
spouse_handle = utils.find_spouse(self.person, rel_family)
if spouse_handle:
spouse = self.r_db.get_person_from_handle(spouse_handle)
pedsp = (Html("li", class_="spouse") +
self.pedigree_person(spouse)
)
else:
pedsp = (Html("li", class_="spouse"))
ped += [pedsp]
childlist = rel_family.get_child_ref_list()
if childlist:
with Html("ol") as childol:
pedsp += [childol]
for child_ref in childlist:
child = self.r_db.get_person_from_handle(child_ref.ref)
if child:
childol += (Html("li") +
self.pedigree_person(child)
)
return ped
#################################################
#
# creates the Repository List Page and Repository Pages
#
#################################################
class RepositoryPages(BasePage):
"""
This class is responsible for displaying information about the 'Repository'
database objects. It displays this information under the 'Individuals'
tab. It is told by the 'add_instances' call which 'Repository's to display,
and remembers the list of persons. A single call to 'display_pages'
displays both the Individual List (Index) page and all the Individual
pages.
The base class 'BasePage' is initialised once for each page that is
displayed.
"""
def __init__(self, report):
"""
@param: report -- The instance of the main report class for this report
"""
BasePage.__init__(self, report, title="")
self.repos_dict = defaultdict(set)
def display_pages(self, title):
"""
Generate and output the pages under the Repository tab, namely the
repository index and the individual repository pages.
@param: title -- Is the title of the web page
"""
LOG.debug("obj_dict[Person]")
for item in self.report.obj_dict[Repository].items():
LOG.debug(" %s", str(item))
# set progress bar pass for Repositories
with self.r_user.progress(_("Narrated Web Site Report"),
_('Creating repository pages'),
len(self.report.obj_dict[Repository]) + 1
) as step:
# Sort the repositories
repos_dict = {}
for repo_handle in self.report.obj_dict[Repository]:
repository = self.r_db.get_repository_from_handle(repo_handle)
key = repository.get_name() + str(repository.get_gramps_id())
repos_dict[key] = (repository, repo_handle)
keys = sorted(repos_dict, key=self.rlocale.sort_key)
# RepositoryListPage Class
self.repositorylistpage(self.report, title, repos_dict, keys)
for index, key in enumerate(keys):
(repo, handle) = repos_dict[key]
step()
self.repositorypage(self.report, title, repo, handle)
def repositorylistpage(self, report, title, repos_dict, keys):
"""
Create Index for repositories
@param: report -- The instance of the main report class
for this report
@param: title -- Is the title of the web page
@param: repos_dict -- The dictionary for all repositories
@param: keys -- The keys used to access repositories
"""
BasePage.__init__(self, report, title)
#inc_repos = self.report.options["inc_repository"]
output_file, sio = self.report.create_file("repositories")
repolistpage, head, body = self.write_header(_("Repositories"))
ldatec = 0
# begin RepositoryList division
with Html("div", class_="content",
id="RepositoryList") as repositorylist:
body += repositorylist
msg = self._("This page contains an index of "
"all the repositories in the "
"database, sorted by their title. "
"Clicking on a repositories’s "
"title will take you to that repositories’s page.")
repositorylist += Html("p", msg, id="description")
# begin repositories table and table head
with Html("table", class_="infolist primobjlist repolist") as table:
repositorylist += table
thead = Html("thead")
table += thead
trow = Html("tr") + (
Html("th", " ", class_="ColumnRowLabel", inline=True),
Html("th", self._("Type"), class_="ColumnType",
inline=True),
Html("th", self._("Repository |Name"), class_="ColumnName",
inline=True)
)
thead += trow
# begin table body
tbody = Html("tbody")
table += tbody
for index, key in enumerate(keys):
(repo, handle) = repos_dict[key]
trow = Html("tr")
tbody += trow
# index number
trow += Html("td", index + 1, class_="ColumnRowLabel",
inline=True)
# repository type
rtype = self._(repo.type.xml_str())
trow += Html("td", rtype, class_="ColumnType", inline=True)
# repository name and hyperlink
if repo.get_name():
trow += Html("td",
self.repository_link(handle,
repo.get_name(),
repo.get_gramps_id(),
self.uplink),
class_="ColumnName")
ldatec = repo.get_change_time()
else:
trow += Html("td", "[ untitled ]", class_="ColumnName")
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(repolistpage, output_file, sio, ldatec)
def repositorypage(self, report, title, repo, handle):
"""
Create one page for one repository.
@param: report -- The instance of the main report class for this report
@param: title -- Is the title of the web page
@param: repo -- the repository to use
@param: handle -- the handle to use
"""
gid = repo.get_gramps_id()
BasePage.__init__(self, report, title, gid)
ldatec = repo.get_change_time()
output_file, sio = self.report.create_file(handle, 'repo')
self.uplink = True
repositorypage, head, body = self.write_header(_('Repositories'))
# begin RepositoryDetail division and page title
with Html("div", class_="content",
id="RepositoryDetail") as repositorydetail:
body += repositorydetail
# repository name
repositorydetail += Html("h3", html_escape(repo.name), inline=True)
# begin repository table
with Html("table", class_="infolist repolist") as table:
repositorydetail += table
tbody = Html("tbody")
table += tbody
if not self.noid and gid:
trow = Html("tr") + (
Html("td", self._("Gramps ID"),
class_="ColumnAttribute",
inline=True),
Html("td", gid, class_="ColumnValue", inline=True)
)
tbody += trow
trow = Html("tr") + (
Html("td", self._("Type"), class_="ColumnAttribute",
inline=True),
Html("td", self._(repo.get_type().xml_str()),
class_="ColumnValue",
inline=True)
)
tbody += trow
# repository: address(es)...
# repository addresses do NOT have Sources
repo_address = self.display_addr_list(repo.get_address_list(),
False)
if repo_address is not None:
repositorydetail += repo_address
# repository: urllist
urllist = self.display_url_list(repo.get_url_list())
if urllist is not None:
repositorydetail += urllist
# reposity: notelist
notelist = self.display_note_list(repo.get_note_list())
if notelist is not None:
repositorydetail += notelist
# display Repository Referenced Sources...
ref_list = self.display_bkref_list(Repository, repo.get_handle())
if ref_list is not None:
repositorydetail += ref_list
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(repositorypage, output_file, sio, ldatec)
class AddressBookListPage(BasePage):
"""
Create the index for addresses.
"""
def __init__(self, report, title, has_url_addr_res):
"""
@param: report -- The instance of the main report class
for this report
@param: title -- Is the title of the web page
@param: has_url_addr_res -- The url, address and residence to use
for the report
"""
BasePage.__init__(self, report, title)
# Name the file, and create it
output_file, sio = self.report.create_file("addressbook")
# Add xml, doctype, meta and stylesheets
addressbooklistpage, head, body = self.write_header(_("Address Book"))
# begin AddressBookList division
with Html("div", class_="content",
id="AddressBookList") as addressbooklist:
body += addressbooklist
# Address Book Page message
msg = _("This page contains an index of all the individuals in "
"the database, sorted by their surname, with one of the "
"following: Address, Residence, or Web Links. "
"Selecting the person’s name will take you "
"to their individual Address Book page.")
addressbooklist += Html("p", msg, id="description")
# begin Address Book table
with Html("table",
class_="infolist primobjlist addressbook") as table:
addressbooklist += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
trow.extend(
Html("th", label, class_=colclass, inline=True)
for (label, colclass) in [
[" ", "ColumnRowLabel"],
[_("Full Name"), "ColumnName"],
[_("Address"), "ColumnAddress"],
[_("Residence"), "ColumnResidence"],
[_("Web Links"), "ColumnWebLinks"]
]
)
tbody = Html("tbody")
table += tbody
index = 1
for (sort_name, person_handle,
has_add, has_res,
has_url) in has_url_addr_res:
address = None
residence = None
weblinks = None
# has address but no residence event
if has_add and not has_res:
address = "X"
# has residence, but no addresses
elif has_res and not has_add:
residence = "X"
# has residence and addresses too
elif has_add and has_res:
address = "X"
residence = "X"
# has Web Links
if has_url:
weblinks = "X"
trow = Html("tr")
tbody += trow
trow.extend(
Html("td", data or " ", class_=colclass,
inline=True)
for (colclass, data) in [
["ColumnRowLabel", index],
["ColumnName",
self.addressbook_link(person_handle)],
["ColumnAddress", address],
["ColumnResidence", residence],
["ColumnWebLinks", weblinks]
]
)
index += 1
# Add footer and clearline
footer = self.write_footer(None)
body += (FULLCLEAR, footer)
# send the page out for processing
# and close the file
self.xhtml_writer(addressbooklistpage, output_file, sio, 0)
class AddressBookPage(BasePage):
"""
Create one page for one Address
"""
def __init__(self, report, title, person_handle, has_add, has_res, has_url):
"""
@param: report -- The instance of the main report class
for this report
@param: title -- Is the title of the web page
@param: person_handle -- the url, address and residence to use
for the report
@param: has_add -- the address to use for the report
@param: has_res -- the residence to use for the report
@param: has_url -- the url to use for the report
"""
person = report.database.get_person_from_handle(person_handle)
BasePage.__init__(self, report, title, person.gramps_id)
self.bibli = Bibliography()
self.uplink = True
# set the file name and open file
output_file, sio = self.report.create_file(person_handle, "addr")
addressbookpage, head, body = self.write_header(_("Address Book"))
# begin address book page division and section title
with Html("div", class_="content",
id="AddressBookDetail") as addressbookdetail:
body += addressbookdetail
link = self.new_person_link(person_handle, uplink=True,
person=person)
addressbookdetail += Html("h3", link)
# individual has an address
if has_add:
addressbookdetail += self.display_addr_list(has_add, None)
# individual has a residence
if has_res:
addressbookdetail.extend(
self.dump_residence(res)
for res in has_res
)
# individual has a url
if has_url:
addressbookdetail += self.display_url_list(has_url)
# add fullclear for proper styling
# and footer section to page
footer = self.write_footer(None)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(addressbookpage, output_file, sio, 0)
class StatisticsPage(BasePage):
"""
Create one page for statistics
"""
def __init__(self, report, title, step):
"""
@param: report -- The instance of the main report class
for this report
@param: title -- Is the title of the web page
"""
import posixpath
BasePage.__init__(self, report, title)
self.bibli = Bibliography()
self.uplink = False
self.report = report
# set the file name and open file
output_file, sio = self.report.create_file("statistics")
addressbookpage, head, body = self.write_header(_("Statistics"))
(males,
females,
unknown) = self.get_gender(report.database.iter_person_handles())
mobjects = report.database.get_number_of_media()
npersons = report.database.get_number_of_people()
nfamilies = report.database.get_number_of_families()
nsurnames = len(set(report.database.surname_list))
notfound = []
total_media = 0
mbytes = "0"
chars = 0
for media in report.database.iter_media():
total_media += 1
fullname = media_path_full(report.database, media.get_path())
try:
chars += posixpath.getsize(fullname)
length = len(str(chars))
if chars <= 999999:
mbytes = _("less than 1")
else:
mbytes = str(chars)[:(length-6)]
except OSError:
notfound.append(media.get_path())
with Html("div", class_="content", id='EventDetail') as section:
section += Html("h3", self._("Database overview"), inline=True)
body += section
with Html("div", class_="content", id='subsection narrative') as sec11:
sec11 += Html("h4", self._("Individuals"), inline=True)
body += sec11
with Html("div", class_="content", id='subsection narrative') as sec1:
sec1 += Html("br", self._("Number of individuals") + ":" +
"%d" % npersons, inline=True)
sec1 += Html("br", self._("Males") + ":" +
"%d" % males, inline=True)
sec1 += Html("br", self._("Females") + ":" +
"%d" % females, inline=True)
sec1 += Html("br", self._("Individuals with unknown gender") + ":" +
"%d" % unknown, inline=True)
body += sec1
with Html("div", class_="content", id='subsection narrative') as sec2:
sec2 += Html("h4", self._("Family Information"), inline=True)
sec2 += Html("br", self._("Number of families") + ":" +
"%d" % nfamilies, inline=True)
sec2 += Html("br", self._("Unique surnames") + ":" +
"%d" % nsurnames, inline=True)
body += sec2
with Html("div", class_="content", id='subsection narrative') as sec3:
sec3 += Html("h4", self._("Media Objects"), inline=True)
sec3 += Html("br",
self._("Total number of media object references") +
":" + "%d" % total_media, inline=True)
sec3 += Html("br", self._("Number of unique media objects") +
":" + "%d" % mobjects, inline=True)
sec3 += Html("br", self._("Total size of media objects") +
":" + "%8s %s" % (mbytes, self._("Megabyte|MB")),
inline=True)
sec3 += Html("br", self._("Missing Media Objects") +
":" + "%d" % len(notfound), inline=True)
body += sec3
with Html("div", class_="content", id='subsection narrative') as sec4:
sec4 += Html("h4", self._("Miscellaneous"), inline=True)
sec4 += Html("br", self._("Number of events") +
":" + "%d" % report.database.get_number_of_events(),
inline=True)
sec4 += Html("br", self._("Number of places") +
":" + "%d" % report.database.get_number_of_places(),
inline=True)
nsources = report.database.get_number_of_sources()
sec4 += Html("br", self._("Number of sources") +
":" + "%d" % nsources,
inline=True)
ncitations = report.database.get_number_of_citations()
sec4 += Html("br", self._("Number of citations") +
":" + "%d" % ncitations,
inline=True)
nrepo = report.database.get_number_of_repositories()
sec4 += Html("br", self._("Number of repositories") +
":" + "%d" % nrepo,
inline=True)
body += sec4
(males,
females,
unknown) = self.get_gender(self.report.bkref_dict[Person].keys())
center_person = self.report.database.get_person_from_gramps_id(
self.report.options['pid'])
origin = " :<br/>" + report.filter.get_name(self.rlocale)
with Html("div", class_="content", id='EventDetail') as section:
section += Html("h3",
self._("Narrative web content report for") + origin,
inline=True)
body += section
with Html("div", class_="content", id='subsection narrative') as sec5:
sec5 += Html("h4", self._("Individuals"), inline=True)
sec5 += Html("br", self._("Number of individuals") + ":" +
"%d" % len(self.report.bkref_dict[Person]),
inline=True)
sec5 += Html("br", self._("Males") + ":" +
"%d" % males, inline=True)
sec5 += Html("br", self._("Females") + ":" +
"%d" % females, inline=True)
sec5 += Html("br", self._("Individuals with unknown gender") + ":" +
"%d" % unknown, inline=True)
body += sec5
with Html("div", class_="content", id='subsection narrative') as sec6:
sec6 += Html("h4", self._("Family Information"), inline=True)
sec6 += Html("br", self._("Number of families") + ":" +
"%d" % len(self.report.bkref_dict[Family]),
inline=True)
body += sec6
with Html("div", class_="content", id='subsection narrative') as sec7:
sec7 += Html("h4", self._("Miscellaneous"), inline=True)
sec7 += Html("br", self._("Number of events") +
":" + "%d" % len(self.report.bkref_dict[Event]),
inline=True)
sec7 += Html("br", self._("Number of places") +
":" + "%d" % len(self.report.bkref_dict[Place]),
inline=True)
sec7 += Html("br", self._("Number of sources") +
":" + "%d" % len(self.report.bkref_dict[Source]),
inline=True)
sec7 += Html("br", self._("Number of citations") +
":" + "%d" % len(self.report.bkref_dict[Citation]),
inline=True)
sec7 += Html("br", self._("Number of repositories") +
":" + "%d" % len(self.report.bkref_dict[Repository]),
inline=True)
body += sec7
# add fullclear for proper styling
# and footer section to page
footer = self.write_footer(None)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(addressbookpage, output_file, sio, 0)
def get_gender(self, person_list):
"""
This function return the number of males, females and unknown gender
from a person list.
"""
males = 0
females = 0
unknown = 0
for person_handle in person_list:
person = self.report.database.get_person_from_handle(person_handle)
gender = person.get_gender()
if gender == Person.MALE:
males += 1
elif gender == Person.FEMALE:
females += 1
else:
unknown += 1
return (males, females, unknown)
class NavWebReport(Report):
"""
Create WebReport object that produces the report.
"""
def __init__(self, database, options, user):
"""
@param: database -- The GRAMPS database instance
@param: options -- Instance of the Options class for this report
@param: user -- Instance of a gen.user.User()
"""
Report.__init__(self, database, options, user)
self.user = user
menu = options.menu
self.link_prefix_up = True
self.options = {}
for optname in menu.get_all_option_names():
menuopt = menu.get_option_by_name(optname)
self.options[optname] = menuopt.get_value()
lang = self.options['trans']
self.rlocale = self.set_locale(lang)
stdoptions.run_private_data_option(self, menu)
stdoptions.run_living_people_option(self, menu)
self.database = CacheProxyDb(self.database)
self._db = self.database
filters_option = menu.get_option_by_name('filter')
self.filter = filters_option.get_filter()
self.copyright = self.options['cright']
self.target_path = self.options['target']
self.ext = self.options['ext']
self.css = self.options['css']
self.navigation = self.options["navigation"]
self.citationreferents = self.options['citationreferents']
self.title = self.options['title']
self.inc_gallery = self.options['gallery']
self.inc_unused_gallery = self.options['unused']
self.create_thumbs_only = self.options['create_thumbs_only']
self.opts = self.options
self.inc_contact = self.opts['contactnote'] or self.opts['contactimg']
# name format options
self.name_format = self.options['name_format']
# include families or not?
self.inc_families = self.options['inc_families']
# create an event pages or not?
self.inc_events = self.options['inc_events']
# include repository page or not?
self.inc_repository = self.options['inc_repository']
# include GENDEX page or not?
self.inc_gendex = self.options['inc_gendex']
# Download Options Tab
self.inc_download = self.options['incdownload']
self.dl_fname1 = self.options['down_fname1']
self.dl_descr1 = self.options['dl_descr1']
self.dl_fname2 = self.options['down_fname2']
self.dl_descr2 = self.options['dl_descr2']
self.encoding = self.options['encoding']
self.use_archive = self.options['archive']
self.use_intro = self.options['intronote'] or self.options['introimg']
self.use_home = self.options['homenote'] or self.options['homeimg']
self.use_contact = self.opts['contactnote'] or self.opts['contactimg']
# Do we need to include this in a cms ?
self.usecms = self.options['usecms']
self.target_uri = self.options['cmsuri']
# Do we need to include web calendar ?
self.usecal = self.options['usecal']
self.target_cal_uri = self.options['caluri']
# either include the gender graphics or not?
self.ancestortree = self.options['ancestortree']
# whether to display children in birthorder or entry order?
self.birthorder = self.options['birthorder']
# get option for Internet Address Book
self.inc_addressbook = self.options["inc_addressbook"]
# Place Map tab options
self.placemappages = self.options['placemappages']
self.familymappages = self.options['familymappages']
self.mapservice = self.options['mapservice']
self.googleopts = self.options['googleopts']
self.googlemapkey = self.options['googlemapkey']
if self.use_home:
self.index_fname = "index"
self.surname_fname = "surnames"
self.intro_fname = "introduction"
elif self.use_intro:
self.index_fname = None
self.surname_fname = "surnames"
self.intro_fname = "index"
else:
self.index_fname = None
self.surname_fname = "index"
self.intro_fname = None
self.archive = None
self.cur_fname = None # Internal use. The name of the output file,
# to be used for the tar archive.
self.string_io = None
if self.use_archive:
self.html_dir = None
else:
self.html_dir = self.target_path
self.warn_dir = True # Only give warning once.
self.obj_dict = None
self.visited = None
self.bkref_dict = None
self.rel_class = None
self.tab = None
def write_report(self):
"""
The first method called to write the Narrative Web after loading options
"""
global _WRONGMEDIAPATH
_WRONGMEDIAPATH = []
if not self.use_archive:
dir_name = self.target_path
if dir_name is None:
dir_name = get_curr_dir()
elif not os.path.isdir(dir_name):
parent_dir = os.path.dirname(dir_name)
if not os.path.isdir(parent_dir):
msg = _("Neither %(current)s nor %(parent)s "
"are directories") % {
'current': dir_name, 'parent': parent_dir}
self.user.notify_error(msg)
return
else:
try:
os.mkdir(dir_name)
except IOError as value:
msg = _("Could not create the directory: %s"
) % dir_name + "\n" + value[1]
self.user.notify_error(msg)
return
except:
msg = _("Could not create the directory: %s") % dir_name
self.user.notify_error(msg)
return
try:
image_dir_name = os.path.join(dir_name, 'images')
if not os.path.isdir(image_dir_name):
os.mkdir(image_dir_name)
image_dir_name = os.path.join(dir_name, 'thumb')
if not os.path.isdir(image_dir_name):
os.mkdir(image_dir_name)
except IOError as value:
msg = _("Could not create the directory: %s"
) % image_dir_name + "\n" + value[1]
self.user.notify_error(msg)
return
except:
msg = _("Could not create the directory: %s"
) % image_dir_name + "\n" + value[1]
self.user.notify_error(msg)
return
else:
if os.path.isdir(self.target_path):
self.user.notify_error(
_('Invalid file name'),
_('The archive file must be a file, not a directory'))
return
try:
self.archive = tarfile.open(self.target_path, "w:gz")
except (OSError, IOError) as value:
self.user.notify_error(
_("Could not create %s") % self.target_path,
str(value))
return
config.set('paths.website-directory',
os.path.dirname(self.target_path) + os.sep)
if self.usecms:
config.set('paths.website-cms-uri',
os.path.dirname(self.target_uri))
if self.usecal:
config.set('paths.website-cal-uri',
os.path.dirname(self.target_cal_uri))
# for use with discovering biological, half, and step siblings for use
# in display_ind_parents()...
self.rel_class = get_relationship_calculator(reinit=True,
clocale=self.rlocale)
#################################################
#
# Pass 0 Initialise the plug-ins
#
#################################################
# FIXME: The whole of this section of code should be implemented by the
# registration process for the Web Page plugins.
# Note that by use of a dictionary we ensure that at most one Web Page
# plugin is provided for any object class
self.tab = {}
# FIXME: Initialising self.tab in this way means that this code has to
# run before the Web Page registration - I am not sure whether this is
# possible, in which case an alternative approach to provinding the
# mapping of object class to Web Page plugin will be needed.
for obj_class in ("Person", "Family", "Source", "Citation", "Place",
"Event", "Media", "Repository"):
# FIXME: Would it be better if the Web Page plugins used a different
# base class rather than BasePage, which is really just for each web
# page
self.tab[obj_class] = BasePage(report=self, title="")
# Note that by not initialising any Web Page plugins that are not going
# to generate pages, we ensure that there is not performance implication
# for such plugins.
self.tab["Person"] = PersonPages(self)
if self.inc_families:
self.tab["Family"] = FamilyPages(self)
if self.inc_events:
self.tab["Event"] = EventPages(self)
if self.inc_gallery:
self.tab["Media"] = MediaPages(self)
self.tab["Place"] = PlacePages(self)
self.tab["Source"] = SourcePages(self)
self.tab["Repository"] = RepositoryPages(self)
self.tab["Citation"] = CitationPages(self)
# FIXME: The following routines that are not run in two passes have not
# yet been converted to a form suitable for separation into Web Page
# plugins: SurnamePage, SurnameListPage, IntroductionPage, HomePage,
# ThumbnailPreviewPage, DownloadPage, ContactPage,AddressBookListPage,
# AddressBookPage
#################################################
#
# Pass 1 Build the lists of objects to be output
#
#################################################
self._build_obj_dict()
#################################################
#
# Pass 2 Generate the web pages
#
#################################################
self.base_pages()
self.visited = []
# build classes IndividualListPage and IndividualPage
self.tab["Person"].display_pages(self.title)
self.build_gendex(self.obj_dict[Person])
# build classes SurnameListPage and SurnamePage
self.surname_pages(self.obj_dict[Person])
# build classes FamilyListPage and FamilyPage
if self.inc_families:
self.tab["Family"].display_pages(self.title)
# build classes EventListPage and EventPage
if self.inc_events:
self.tab["Event"].display_pages(self.title)
# build classes PlaceListPage and PlacePage
self.tab["Place"].display_pages(self.title)
# build classes RepositoryListPage and RepositoryPage
if self.inc_repository:
self.tab["Repository"].display_pages(self.title)
# build classes MediaListPage and MediaPage
if self.inc_gallery:
if not self.create_thumbs_only:
self.tab["Media"].display_pages(self.title)
# build Thumbnail Preview Page...
self.thumbnail_preview_page()
# build classes AddressBookListPage and AddressBookPage
if self.inc_addressbook:
self.addressbook_pages(self.obj_dict[Person])
# build classes SourceListPage and SourcePage
self.tab["Source"].display_pages(self.title)
# build classes StatisticsPage
self.statistics_preview_page(self.title)
# copy all of the neccessary files
self.copy_narrated_files()
# if an archive is being used, close it?
if self.archive:
self.archive.close()
if len(_WRONGMEDIAPATH) > 0:
error = '\n'.join([
_('ID=%(grampsid)s, path=%(dir)s') % {
'grampsid' : x[0],
'dir' : x[1]} for x in _WRONGMEDIAPATH[:10]])
if len(_WRONGMEDIAPATH) > 10:
error += '\n ...'
self.user.warn(_("Missing media objects:"), error)
def _build_obj_dict(self):
"""
Construct the dictionaries of objects to be included in the reports.
There are two dictionaries, which have the same structure: they are two
level dictionaries,the first key is the class of object
(e.g. gen.lib.Person).
The second key is the handle of the object.
For the obj_dict, the value is a tuple containing the gramps_id,
the text name for the object, and the file name for the display.
For the bkref_dict, the value is a tuple containg the class of object
and the handle for the object that refers to the 'key' object.
"""
_obj_class_list = (Person, Family, Event, Place, Source, Citation,
Media, Repository, Note, Tag)
# setup a dictionary of the required structure
self.obj_dict = defaultdict(lambda: defaultdict(set))
self.bkref_dict = defaultdict(lambda: defaultdict(set))
# initialise the dictionary to empty in case no objects of any
# particular class are incuded in the web report
for obj_class in _obj_class_list:
self.obj_dict[obj_class] = defaultdict(set)
ind_list = self._db.iter_person_handles()
ind_list = self.filter.apply(self._db, ind_list, user=self.user)
with self.user.progress(_("Narrated Web Site Report"),
_('Constructing list of other objects...'),
sum(1 for _ in ind_list)) as step:
for handle in ind_list:
# FIXME work around bug that self.database.iter under python 3
# returns (binary) data rather than text
if not isinstance(handle, str):
handle = handle.decode('utf-8')
step()
self._add_person(handle, "", "")
LOG.debug("final object dictionary \n" +
"".join(("%s: %s\n" % item)
for item in self.obj_dict.items()))
LOG.debug("final backref dictionary \n" +
"".join(("%s: %s\n" % item)
for item in self.bkref_dict.items()))
def _add_person(self, person_handle, bkref_class, bkref_handle):
"""
Add person_handle to the obj_dict, and recursively all referenced
objects
@param: person_handle -- The handle for the person to add
@param: bkref_class -- The class associated to this handle (person)
@param: bkref_handle -- The handle associated to this person
"""
person = self._db.get_person_from_handle(person_handle)
if person:
person_name = self.get_person_name(person)
person_fname = self.build_url_fname(person_handle, "ppl",
False) + self.ext
self.obj_dict[Person][person_handle] = (person_fname, person_name,
person.gramps_id)
self.bkref_dict[Person][person_handle].add((bkref_class,
bkref_handle))
############### Header section ##############
for citation_handle in person.get_citation_list():
self._add_citation(citation_handle, Person, person_handle)
############### Name section ##############
for name in [person.get_primary_name()
] + person.get_alternate_names():
for citation_handle in name.get_citation_list():
self._add_citation(citation_handle, Person, person_handle)
############### Events section ##############
# Now tell the events tab to display the individual events
evt_ref_list = person.get_event_ref_list()
if evt_ref_list:
for evt_ref in evt_ref_list:
event = self._db.get_event_from_handle(evt_ref.ref)
if event:
self._add_event(evt_ref.ref, Person, person_handle)
place_handle = event.get_place_handle()
if place_handle:
self._add_place(place_handle, Person,
person_handle, event)
# If event pages are not being output, then tell the
# media tab to display the perosn's event media. If
# events are being displayed, then the media are linked
# from the event tab
if not self.inc_events:
for media_ref in event.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Person,
person_handle)
for citation_handle in event.get_citation_list():
self._add_citation(citation_handle, Person,
person_handle)
############### Families section ##############
# Tell the families tab to display this individuals families
family_handle_list = person.get_family_handle_list()
if family_handle_list:
for family_handle in person.get_family_handle_list():
self._add_family(family_handle, Person, person_handle)
# Tell the events tab to display the family events which
# are referenced from the individual page.
family = self._db.get_family_from_handle(family_handle)
if family:
family_evt_ref_list = family.get_event_ref_list()
if family_evt_ref_list:
for evt_ref in family_evt_ref_list:
event = self._db.get_event_from_handle(
evt_ref.ref)
if event:
self._add_event(evt_ref.ref, Person,
person_handle)
place_handle = event.get_place_handle()
if place_handle:
self._add_place(place_handle, Person,
person_handle, event)
for cite_hdl in event.get_citation_list():
self._add_citation(cite_hdl, Person,
person_handle)
# add the family media and the family event media if the
# families page is not being displayed (If it is displayed,
# the media are linked from the families page)
if not self.inc_families:
for m_ref in event.get_media_list():
m_hdl = m_ref.get_reference_handle()
self._add_media(m_hdl, Person,
person_handle)
for lds_ord in family.get_lds_ord_list():
for citation_handle in lds_ord.get_citation_list():
self._add_citation(citation_handle,
Person, person_handle)
for attr in family.get_attribute_list():
for citation_handle in attr.get_citation_list():
self._add_citation(citation_handle,
Person, person_handle)
if not self.inc_families:
for media_ref in family.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Person,
person_handle)
############### LDS Ordinance section ##############
for lds_ord in person.get_lds_ord_list():
for citation_handle in lds_ord.get_citation_list():
self._add_citation(citation_handle, Person, person_handle)
############### Attribute section ##############
for attr in person.get_attribute_list():
for citation_handle in attr.get_citation_list():
self._add_citation(citation_handle, Person, person_handle)
############### Address section ##############
for addr in person.get_address_list():
for addr_handle in addr.get_citation_list():
self._add_citation(addr_handle, Person, person_handle)
############### Media section ##############
# Now tell the Media tab which media objects to display
# First the person's media objects
for media_ref in person.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Person, person_handle)
def get_person_name(self, person):
"""
Return a string containing the person's primary name in the name
format chosen in the web report options
@param: person -- person object from database
"""
name_format = self.options['name_format']
primary_name = person.get_primary_name()
name = Name(primary_name)
name.set_display_as(name_format)
return _nd.display_name(name)
def _add_family(self, family_handle, bkref_class, bkref_handle):
"""
Add family to the Family object list
@param: family_handle -- The handle for the family to add
@param: bkref_class -- The class associated to this handle (family)
@param: bkref_handle -- The handle associated to this family
"""
family = self._db.get_family_from_handle(family_handle)
family_name = self.get_family_name(family)
if self.inc_families:
family_fname = self.build_url_fname(family_handle, "fam",
False) + self.ext
else:
family_fname = ""
self.obj_dict[Family][family_handle] = (family_fname, family_name,
family.gramps_id)
self.bkref_dict[Family][family_handle].add((bkref_class, bkref_handle))
if self.inc_gallery:
for media_ref in family.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Family, family_handle)
############### Events section ##############
for evt_ref in family.get_event_ref_list():
event = self._db.get_event_from_handle(evt_ref.ref)
place_handle = event.get_place_handle()
if place_handle:
self._add_place(place_handle, Family, family_handle, event)
if self.inc_events:
# detail for family events are displayed on the events pages as
# well as on this family page
self._add_event(evt_ref.ref, Family, family_handle)
else:
# There is no event page. Family events are displayed on the
# family page, but the associated family event media may need to
# be displayed on the media page
if self.inc_gallery:
for media_ref in event.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Family, family_handle)
############### LDS Ordinance section ##############
for lds_ord in family.get_lds_ord_list():
for citation_handle in lds_ord.get_citation_list():
self._add_citation(citation_handle, Family, family_handle)
############### Attributes section ##############
for attr in family.get_attribute_list():
for citation_handle in attr.get_citation_list():
self._add_citation(citation_handle, Family, family_handle)
############### Sources section ##############
for citation_handle in family.get_citation_list():
self._add_citation(citation_handle, Family, family_handle)
def get_family_name(self, family):
"""
Return a string containing the name of the family (e.g. 'Family of John
Doe and Jane Doe')
@param: family -- family object from database
"""
husband_handle = family.get_father_handle()
spouse_handle = family.get_mother_handle()
if husband_handle:
husband = self._db.get_person_from_handle(husband_handle)
else:
husband = None
if spouse_handle:
spouse = self._db.get_person_from_handle(spouse_handle)
else:
spouse = None
if husband and spouse:
husband_name = self.get_person_name(husband)
spouse_name = self.get_person_name(spouse)
title_str = self._("Family of %(husband)s and %(spouse)s"
) % {'husband' : husband_name,
'spouse' : spouse_name}
elif husband:
husband_name = self.get_person_name(husband)
# Only the name of the husband is known
title_str = self._("Family of %s") % husband_name
elif spouse:
spouse_name = self.get_person_name(spouse)
# Only the name of the wife is known
title_str = self._("Family of %s") % spouse_name
else:
title_str = ''
return title_str
def _add_event(self, event_handle, bkref_class, bkref_handle):
"""
Add event to the Event object list
@param: event_handle -- The handle for the event to add
@param: bkref_class -- The class associated to this handle (event)
@param: bkref_handle -- The handle associated to this event
"""
event = self._db.get_event_from_handle(event_handle)
event_name = event.get_description()
# The event description can be Y on import from GEDCOM. See the
# following quote from the GEDCOM spec: "The occurrence of an event is
# asserted by the presence of either a DATE tag and value or a PLACe tag
# and value in the event structure. When neither the date value nor the
# place value are known then a Y(es) value on the parent event tag line
# is required to assert that the event happened.""
if event_name == "" or event_name is None or event_name == 'Y':
event_name = str(event.get_type())
# begin add generated descriptions to media pages
# (request 7074 : acrider)
ref_name = ""
for reference in self._db.find_backlink_handles(event_handle):
ref_class, ref_handle = reference
if ref_class == 'Person':
person = self._db.get_person_from_handle(ref_handle)
ref_name = self.get_person_name(person)
elif ref_class == 'Family':
family = self._db.get_family_from_handle(ref_handle)
ref_name = self.get_family_name(family)
if ref_name != "":
event_name += ", " + ref_name
# end descriptions to media pages
if self.inc_events:
event_fname = self.build_url_fname(event_handle, "evt",
False) + self.ext
else:
event_fname = ""
self.obj_dict[Event][event_handle] = (event_fname, event_name,
event.gramps_id)
self.bkref_dict[Event][event_handle].add((bkref_class, bkref_handle))
############### Attribute section ##############
for attr in event.get_attribute_list():
for citation_handle in attr.get_citation_list():
self._add_citation(citation_handle, Event, event_handle)
############### Source section ##############
for citation_handle in event.get_citation_list():
self._add_citation(citation_handle, Event, event_handle)
############### Media section ##############
if self.inc_gallery:
for media_ref in event.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Event, event_handle)
def _add_place(self, place_handle, bkref_class, bkref_handle, event):
"""
Add place to the Place object list
@param: place_handle -- The handle for the place to add
@param: bkref_class -- The class associated to this handle (place)
@param: bkref_handle -- The handle associated to this place
"""
place = self._db.get_place_from_handle(place_handle)
if place is None:
return
if config.get('preferences.place-auto'):
place_name = _pd.display_event(self._db, event)
else:
place_name = place.get_title()
place_fname = self.build_url_fname(place_handle, "plc",
False) + self.ext
self.obj_dict[Place][place_handle] = (place_fname, place_name,
place.gramps_id, event)
self.bkref_dict[Place][place_handle].add((bkref_class, bkref_handle))
############### Media section ##############
if self.inc_gallery:
for media_ref in place.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Place, place_handle)
############### Sources section ##############
for citation_handle in place.get_citation_list():
self._add_citation(citation_handle, Place, place_handle)
def _add_source(self, source_handle, bkref_class, bkref_handle):
"""
Add source to the Source object list
@param: source_handle -- The handle for the source to add
@param: bkref_class -- The class associated to this handle (source)
@param: bkref_handle -- The handle associated to this source
"""
source = self._db.get_source_from_handle(source_handle)
source_name = source.get_title()
source_fname = self.build_url_fname(source_handle, "src",
False) + self.ext
self.obj_dict[Source][source_handle] = (source_fname, source_name,
source.gramps_id)
self.bkref_dict[Source][source_handle].add((bkref_class, bkref_handle))
############### Media section ##############
if self.inc_gallery:
for media_ref in source.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Source, source_handle)
############### Repository section ##############
if self.inc_repository:
for repo_ref in source.get_reporef_list():
repo_handle = repo_ref.get_reference_handle()
self._add_repository(repo_handle, Source, source_handle)
def _add_citation(self, citation_handle, bkref_class, bkref_handle):
"""
Add citation to the Citation object list
@param: citation_handle -- The handle for the citation to add
@param: bkref_class -- The class associated to this handle
@param: bkref_handle -- The handle associated to this citation
"""
citation = self._db.get_citation_from_handle(citation_handle)
# If Page is none, we want to make sure that a tuple is generated for
# the source backreference
citation_name = citation.get_page() or ""
source_handle = citation.get_reference_handle()
self.obj_dict[Citation][citation_handle] = ("", citation_name,
citation.gramps_id)
self.bkref_dict[Citation][citation_handle].add((bkref_class,
bkref_handle))
############### Source section ##############
self._add_source(source_handle, Citation, citation_handle)
############### Media section ##############
if self.inc_gallery:
for media_ref in citation.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Citation, citation_handle)
def _add_media(self, media_handle, bkref_class, bkref_handle):
"""
Add media to the Media object list
@param: media_handle -- The handle for the media to add
@param: bkref_class -- The class associated to this handle (media)
@param: bkref_handle -- The handle associated to this media
"""
media_refs = self.bkref_dict[Media].get(media_handle)
if media_refs and (bkref_class, bkref_handle) in media_refs:
return
media = self._db.get_media_from_handle(media_handle)
# use media title (request 7074 acrider)
media_name = media.get_description()
if media_name is None or media_name == "":
media_name = "Media"
#end media title
if self.inc_gallery:
media_fname = self.build_url_fname(media_handle, "img",
False) + self.ext
else:
media_fname = ""
self.obj_dict[Media][media_handle] = (media_fname, media_name,
media.gramps_id)
self.bkref_dict[Media][media_handle].add((bkref_class, bkref_handle))
############### Attribute section ##############
for attr in media.get_attribute_list():
for citation_handle in attr.get_citation_list():
self._add_citation(citation_handle, Media, media_handle)
############### Sources section ##############
for citation_handle in media.get_citation_list():
self._add_citation(citation_handle, Media, media_handle)
def _add_repository(self, repos_handle, bkref_class, bkref_handle):
"""
Add repository to the Repository object list
@param: repos_handle -- The handle for the repository to add
@param: bkref_class -- The class associated to this handle (source)
@param: bkref_handle -- The handle associated to this source
"""
repos = self._db.get_repository_from_handle(repos_handle)
repos_name = repos.name
if self.inc_repository:
repos_fname = self.build_url_fname(repos_handle, "repo",
False) + self.ext
else:
repos_fname = ""
self.obj_dict[Repository][repos_handle] = (repos_fname, repos_name,
repos.gramps_id)
self.bkref_dict[Repository][repos_handle].add((bkref_class,
bkref_handle))
def copy_narrated_files(self):
"""
Copy all of the CSS, image, and javascript files for Narrated Web
"""
imgs = []
# copy screen style sheet
if CSS[self.css]["filename"]:
fname = CSS[self.css]["filename"]
self.copy_file(fname, _NARRATIVESCREEN, "css")
# copy printer style sheet
fname = CSS["Print-Default"]["filename"]
self.copy_file(fname, _NARRATIVEPRINT, "css")
# copy ancestor tree style sheet if tree is being created?
if self.ancestortree:
fname = CSS["ancestortree"]["filename"]
self.copy_file(fname, "ancestortree.css", "css")
# copy behaviour style sheet
fname = CSS["behaviour"]["filename"]
self.copy_file(fname, "behaviour.css", "css")
# copy Menu Layout Style Sheet if Blue or Visually is being
# used as the stylesheet?
if CSS[self.css]["navigation"]:
if self.navigation == "Horizontal":
fname = CSS["Horizontal-Menus"]["filename"]
elif self.navigation == "Vertical":
fname = CSS["Vertical-Menus"]["filename"]
elif self.navigation == "Fade":
fname = CSS["Fade-Menus"]["filename"]
elif self.navigation == "dropdown":
fname = CSS["DropDown-Menus"]["filename"]
self.copy_file(fname, "narrative-menus.css", "css")
# copy narrative-maps Style Sheet if Place or Family Map pages
# are being created?
if self.placemappages or self.familymappages:
fname = CSS["NarrativeMaps"]["filename"]
self.copy_file(fname, "narrative-maps.css", "css")
# Copy the Creative Commons icon if the Creative Commons
# license is requested
if 0 < self.copyright <= len(_CC):
imgs += [CSS["Copyright"]["filename"]]
# copy Gramps favorite icon #2
imgs += [CSS["favicon2"]["filename"]]
# we need the blank image gif needed by behaviour.css
# add the document.png file for media other than photos
imgs += CSS["All Images"]["images"]
# copy Ancestor Tree graphics if needed???
if self.ancestortree:
imgs += CSS["ancestortree"]["images"]
# Anything css-specific:
imgs += CSS[self.css]["images"]
# copy all to images subdir:
for from_path in imgs:
fdir, fname = os.path.split(from_path)
self.copy_file(from_path, fname, "images")
# copy GRAMPS marker icon for openstreetmap
fname = CSS["marker"]["filename"]
self.copy_file(fname, "marker.png", "images")
def build_gendex(self, ind_list):
"""
Create a gendex file
@param: ind_list -- The list of person to use
"""
if self.inc_gendex:
with self.user.progress(_("Narrated Web Site Report"),
_('Creating GENDEX file'),
len(ind_list)) as step:
fp_gendex, gendex_io = self.create_file("gendex", ext=".txt")
date = 0
for person_handle in ind_list:
step()
person = self._db.get_person_from_handle(person_handle)
datex = person.get_change_time()
if datex > date:
date = datex
if self.archive:
self.write_gendex(gendex_io, person)
else:
self.write_gendex(fp_gendex, person)
self.close_file(fp_gendex, gendex_io, date)
def write_gendex(self, filep, person):
"""
Reference|SURNAME|given name /SURNAME/|date of birth|place of birth|
date of death|place of death|
* field 1: file name of web page referring to the individual
* field 2: surname of the individual
* field 3: full name of the individual
* field 4: date of birth or christening (optional)
* field 5: place of birth or christening (optional)
* field 6: date of death or burial (optional)
* field 7: place of death or burial (optional)
@param: filep -- The gendex output file name
@param: person -- The person to use for gendex file
"""
url = self.build_url_fname_html(person.handle, "ppl")
surname = person.get_primary_name().get_surname()
fullname = person.get_primary_name().get_gedcom_name()
# get birth info:
dob, pob = get_gendex_data(self._db, person.get_birth_ref())
# get death info:
dod, pod = get_gendex_data(self._db, person.get_death_ref())
filep.write(
'|'.join((url, surname, fullname, dob, pob, dod, pod)) + '|\n')
def surname_pages(self, ind_list):
"""
Generates the surname related pages from list of individual
people.
@param: ind_list -- The list of person to use
"""
local_list = sort_people(self._db, ind_list, self.rlocale)
with self.user.progress(_("Narrated Web Site Report"),
_("Creating surname pages"),
len(local_list)) as step:
SurnameListPage(self, self.title, ind_list,
SurnameListPage.ORDER_BY_NAME,
self.surname_fname)
SurnameListPage(self, self.title, ind_list,
SurnameListPage.ORDER_BY_COUNT,
"surnames_count")
for (surname, handle_list) in local_list:
SurnamePage(self, self.title, surname, sorted(handle_list))
step()
def thumbnail_preview_page(self):
"""
creates the thumbnail preview page
"""
with self.user.progress(_("Narrated Web Site Report"),
_("Creating thumbnail preview page..."),
len(self.obj_dict[Media])) as step:
ThumbnailPreviewPage(self, self.title, step)
def statistics_preview_page(self, title):
"""
creates the statistics preview page
"""
with self.user.progress(_("Narrated Web Site Report"),
_("Creating statistics page..."),
len(self.obj_dict[Media])) as step:
StatisticsPage(self, title, step)
def addressbook_pages(self, ind_list):
"""
Create a webpage with a list of address availability for each person
and the associated individual address pages.
@param: ind_list -- The list of person to use
"""
url_addr_res = []
for person_handle in ind_list:
person = self._db.get_person_from_handle(person_handle)
addrlist = person.get_address_list()
evt_ref_list = person.get_event_ref_list()
urllist = person.get_url_list()
add = addrlist or None
url = urllist or None
res = []
for event_ref in evt_ref_list:
event = self._db.get_event_from_handle(event_ref.ref)
if event.get_type() == EventType.RESIDENCE:
res.append(event)
if add or res or url:
primary_name = person.get_primary_name()
sort_name = ''.join([primary_name.get_surname(), ", ",
primary_name.get_first_name()])
url_addr_res.append((sort_name, person_handle, add, res, url))
url_addr_res.sort()
AddressBookListPage(self, self.title, url_addr_res)
# begin Address Book pages
addr_size = len(url_addr_res)
with self.user.progress(_("Narrated Web Site Report"),
_("Creating address book pages ..."),
addr_size) as step:
for (sort_name, person_handle, add, res, url) in url_addr_res:
AddressBookPage(self, self.title, person_handle, add, res, url)
step()
def base_pages(self):
"""
creates HomePage, ContactPage, DownloadPage, and IntroductionPage
if requested by options in plugin
"""
if self.use_home:
HomePage(self, self.title)
if self.inc_contact:
ContactPage(self, self.title)
if self.inc_download:
DownloadPage(self, self.title)
if self.use_intro:
IntroductionPage(self, self.title)
def build_subdirs(self, subdir, fname, uplink=False):
"""
If subdir is given, then two extra levels of subdirectory are inserted
between 'subdir' and the filename. The reason is to prevent directories
with too many entries.
For example, this may return "8/1/aec934857df74d36618"
@param: subdir -- The subdirectory name to use
@param: fname -- The file name for which we need to build the path
@param: uplink -- If True, then "../../../" is inserted in front of the
result.
If uplink = None then [./] for use in EventListPage
"""
subdirs = []
if subdir:
subdirs.append(subdir)
subdirs.append(fname[-1].lower())
subdirs.append(fname[-2].lower())
if self.usecms:
if self.target_uri not in subdirs:
subdirs = [self.target_uri] + subdirs
else:
if uplink == True:
subdirs = ['..']*3 + subdirs
# added for use in EventListPage
elif uplink is None:
subdirs = ['.'] + subdirs
return subdirs
def build_path(self, subdir, fname, uplink=False):
"""
Return the name of the subdirectory.
Notice that we DO use os.path.join() here.
@param: subdir -- The subdirectory name to use
@param: fname -- The file name for which we need to build the path
@param: uplink -- If True, then "../../../" is inserted in front of the
result.
"""
return os.path.join(*self.build_subdirs(subdir, fname, uplink))
def build_url_image(self, fname, subdir=None, uplink=False):
"""
builds a url from an image
@param: fname -- The file name for which we need to build the path
@param: subdir -- The subdirectory name to use
@param: uplink -- If True, then "../../../" is inserted in front of the
result.
"""
subdirs = []
if subdir:
subdirs.append(subdir)
if self.usecms:
if self.target_uri not in subdirs:
subdirs = [self.target_uri] + subdirs
else:
if uplink:
subdirs = ['..']*3 + subdirs
nname = "/".join(subdirs + [fname])
if win():
nname = nname.replace('\\', "/")
return nname
def build_url_fname_html(self, fname, subdir=None, uplink=False):
"""
builds a url filename from html
@param: fname -- The file name to create
@param: subdir -- The subdirectory name to use
@param: uplink -- If True, then "../../../" is inserted in front of the
result.
"""
return self.build_url_fname(fname, subdir, uplink) + self.ext
def build_link(self, prop, handle, obj_class):
"""
Build a link to an item.
@param: prop -- Property
@param: handle -- The handle for which we need to build a link
@param: obj_class -- The class of the related object.
"""
if prop == "gramps_id":
if obj_class in self._db.get_table_names():
obj = self._db.get_table_metadata(obj_class)[
"gramps_id_func"](handle)
if obj:
handle = obj.handle
else:
raise AttributeError("gramps_id '%s' not found in '%s'" %
handle, obj_class)
else:
raise AttributeError("invalid gramps_id lookup "
"in table name '%s'" % obj_class)
uplink = self.link_prefix_up
# handle, ppl
if obj_class == "Person":
if self.person_in_webreport(handle):
return self.build_url_fname(handle, "ppl", uplink) + self.ext
else:
return None
elif obj_class == "Source":
subdir = "src"
elif obj_class == "Place":
subdir = "plc"
elif obj_class == "Event":
subdir = "evt"
elif obj_class == "Media":
subdir = "img"
elif obj_class == "Repository":
subdir = "repo"
elif obj_class == "Family":
subdir = "fam"
else:
print("NarrativeWeb ignoring link type '%s'" % obj_class)
return None
return self.build_url_fname(handle, subdir, uplink) + self.ext
def build_url_fname(self, fname, subdir=None, uplink=False):
"""
Create part of the URL given the filename and optionally the
subdirectory. If the subdirectory is given, then two extra levels of
subdirectory are inserted between 'subdir' and the filename.
The reason is to prevent directories with too many entries.
@param: fname -- The file name to create
@param: subdir -- The subdirectory name to use
@param: uplink -- if True, then "../../../" is inserted in front of the
result.
The extension is added to the filename as well.
Notice that we do NOT use os.path.join() because we're creating a URL.
Imagine we run gramps on Windows (heaven forbits), we don't want to
see backslashes in the URL.
"""
if not fname:
return ""
if win():
fname = fname.replace('\\', "/")
fname = fname.replace(self.target_uri + "/", "")
if self.usecms:
subdirs = self.build_subdirs(subdir, fname, False)
else:
subdirs = self.build_subdirs(subdir, fname, uplink)
return "/".join(subdirs + [fname])
def create_file(self, fname, subdir=None, ext=None):
"""
will create filename given
@param: fname -- File name to be created
@param: subdir -- A subdir to be added to filename
@param: ext -- An extension to be added to filename
"""
if ext is None:
ext = self.ext
if self.usecms and subdir is None:
self.cur_fname = os.path.join(self.target_uri, fname) + ext
else:
if subdir:
subdir = self.build_path(subdir, fname)
self.cur_fname = os.path.join(subdir, fname) + ext
else:
self.cur_fname = fname + ext
if self.archive:
string_io = BytesIO()
output_file = TextIOWrapper(string_io, encoding=self.encoding,
errors='xmlcharrefreplace')
else:
string_io = None
if subdir:
subdir = os.path.join(self.html_dir, subdir)
if not os.path.isdir(subdir):
os.makedirs(subdir)
fname = os.path.join(self.html_dir, self.cur_fname)
output_file = open(fname, 'w', encoding=self.encoding,
errors='xmlcharrefreplace')
return (output_file, string_io)
def close_file(self, output_file, string_io, date):
"""
will close any file passed to it
@param: output_file -- The output file to flush
@param: string_io -- The string IO used when we are in archive mode
@param: date -- The last modification date for this object
If we have "zero", we use the current time.
This is related to bug 8950 and very useful
when we use rsync.
"""
if self.archive:
output_file.flush()
tarinfo = tarfile.TarInfo(self.cur_fname)
tarinfo.size = len(string_io.getvalue())
tarinfo.mtime = date if date != 0 else time.time()
if not win():
tarinfo.uid = os.getuid()
tarinfo.gid = os.getgid()
string_io.seek(0)
self.archive.addfile(tarinfo, string_io)
output_file.close()
else:
output_file.close()
if date > 0:
os.utime(output_file.name, (date, date))
def prepare_copy_media(self, photo):
"""
prepares a media object to copy
@param: photo -- The photo for which we need a real path
and a thumbnail path
"""
handle = photo.get_handle()
ext = os.path.splitext(photo.get_path())[1]
real_path = os.path.join(self.build_path('images', handle),
handle + ext)
thumb_path = os.path.join(self.build_path('thumb', handle),
handle + '.png')
return real_path, thumb_path
def copy_file(self, from_fname, to_fname, to_dir=''):
"""
Copy a file from a source to a (report) destination.
If to_dir is not present and if the target is not an archive,
then the destination directory will be created.
@param: from_fname -- The path of the file to copy.
@param: to_fname -- Will be just a filename, without directory path.
@param: to_dir -- Is the relative path name in the destination root.
It will be prepended before 'to_fname'.
"""
if self.usecms:
to_dir = "/" + self.target_uri + "/" + to_dir
# LOG.debug("copying '%s' to '%s/%s'" % (from_fname, to_dir, to_fname))
mtime = os.stat(from_fname).st_mtime
if self.archive:
def set_mtime(tarinfo):
"""
For each file, we set the last modification time.
We could also set uid, gid, uname, gname and mode
#tarinfo.uid = os.getuid()
#tarinfo.mode = 0660
#tarinfo.uname = tarinfo.gname = "www-data"
"""
tarinfo.mtime = mtime
return tarinfo
dest = os.path.join(to_dir, to_fname)
self.archive.add(from_fname, dest, filter=set_mtime)
else:
dest = os.path.join(self.html_dir, to_dir, to_fname)
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
os.makedirs(destdir)
if from_fname != dest:
try:
shutil.copyfile(from_fname, dest)
os.utime(dest, (mtime, mtime))
except:
print("Copying error: %s" % sys.exc_info()[1])
print("Continuing...")
elif self.warn_dir:
self.user.warn(
_("Possible destination error") + "\n" +
_("You appear to have set your target directory "
"to a directory used for data storage. This "
"could create problems with file management. "
"It is recommended that you consider using "
"a different directory to store your generated "
"web pages."))
self.warn_dir = False
def person_in_webreport(self, person_handle):
"""
Return the handle if we created a page for this person.
@param: person_handle -- The person we are looking for
"""
return person_handle in self.obj_dict[Person]
#################################################
#
# Creates the NarrativeWeb Report Menu Options
#
#################################################
class NavWebOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
"""
@param: name -- The name of the report
@param: dbase -- The Gramps database instance
"""
self.__db = dbase
self.__archive = None
self.__target = None
self.__target_uri = None
self.__pid = None
self.__filter = None
self.__graph = None
self.__graphgens = None
self.__living = None
self.__yearsafterdeath = None
self.__usecms = None
self.__cms_uri = None
self.__usecal = None
self.__calendar_uri = None
self.__create_thumbs_only = None
self.__mapservice = None
self.__maxinitialimageheight = None
self.__maxinitialimagewidth = None
self.__citationreferents = None
self.__incdownload = None
self.__placemappages = None
self.__familymappages = None
self.__googleopts = None
self.__googlemapkey = None
self.__ancestortree = None
self.__css = None
self.__dl_descr1 = None
self.__dl_descr2 = None
self.__down_fname2 = None
self.__gallery = None
self.__unused = None
self.__down_fname1 = None
self.__navigation = None
self.__target_cal_uri = None
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
"""
Add options to the menu for the web site.
@param: menu -- The menu for which we add options
"""
self.__add_report_options(menu)
self.__add_report_options_2(menu)
self.__add_page_generation_options(menu)
self.__add_images_generation_options(menu)
self.__add_privacy_options(menu)
self.__add_download_options(menu)
self.__add_advanced_options(menu)
self.__add_advanced_options_2(menu)
self.__add_place_map_options(menu)
self.__add_others_options(menu)
def __add_report_options(self, menu):
"""
Options on the "Report Options" tab.
"""
category_name = _("Report Options")
addopt = partial(menu.add_option, category_name)
self.__archive = BooleanOption(_('Store web pages in .tar.gz archive'),
False)
self.__archive.set_help(_('Whether to store the web pages in an '
'archive file'))
addopt("archive", self.__archive)
self.__archive.connect('value-changed', self.__archive_changed)
dbname = self.__db.get_dbname()
default_dir = dbname + "_" + "NAVWEB"
self.__target = DestinationOption(
_("Destination"),
os.path.join(config.get('paths.website-directory'),
default_dir))
self.__target.set_help(_("The destination directory for the web "
"files"))
addopt("target", self.__target)
self.__archive_changed()
title = StringOption(_("Web site title"), _('My Family Tree'))
title.set_help(_("The title of the web site"))
addopt("title", title)
self.__filter = FilterOption(_("Filter"), 0)
self.__filter.set_help(
_("Select filter to restrict people that appear on web site"))
addopt("filter", self.__filter)
self.__filter.connect('value-changed', self.__filter_changed)
self.__pid = PersonOption(_("Filter Person"))
self.__pid.set_help(_("The center person for the filter"))
addopt("pid", self.__pid)
self.__pid.connect('value-changed', self.__update_filters)
self.__update_filters()
stdoptions.add_name_format_option(menu, category_name)
stdoptions.add_localization_option(menu, category_name)
def __add_report_options_2(self, menu):
"""
Continue Options on the "Report Options" tab.
"""
category_name = _("Report Options (2)")
addopt = partial( menu.add_option, category_name )
ext = EnumeratedListOption(_("File extension"), ".html")
for etype in _WEB_EXT:
ext.add_item(etype, etype)
ext.set_help(_("The extension to be used for the web files"))
addopt("ext", ext)
cright = EnumeratedListOption(_('Copyright'), 0)
for index, copt in enumerate(_COPY_OPTIONS):
cright.add_item(index, copt)
cright.set_help(_("The copyright to be used for the web files"))
addopt("cright", cright)
self.__css = EnumeratedListOption(_('StyleSheet'), CSS["default"]["id"])
for (fname, gid) in sorted([(CSS[key]["translation"], CSS[key]["id"])
for key in list(CSS.keys())]):
if CSS[gid]["user"]:
self.__css.add_item(CSS[gid]["id"], CSS[gid]["translation"])
self.__css.set_help(_('The stylesheet to be used for the web pages'))
addopt("css", self.__css)
self.__css.connect("value-changed", self.__stylesheet_changed)
_nav_opts = [
(_("Horizontal -- Default"), "Horizontal"),
(_("Vertical -- Left Side"), "Vertical"),
(_("Fade -- WebKit Browsers Only"), "Fade"),
(_("Drop-Down -- WebKit Browsers Only"), "dropdown")
]
self.__navigation = EnumeratedListOption(_("Navigation Menu Layout"),
_nav_opts[0][1])
for layout in _nav_opts:
self.__navigation.add_item(layout[1], layout[0])
self.__navigation.set_help(_("Choose which layout "
"for the Navigation Menus."))
addopt("navigation", self.__navigation)
self.__stylesheet_changed()
_cit_opts = [
(_("Normal Outline Style"), "Outline"),
(_("Drop-Down -- WebKit Browsers Only"), "DropDown")
]
self.__citationreferents = EnumeratedListOption(
_("Citation Referents Layout"), _cit_opts[0][1])
for layout in _cit_opts:
self.__citationreferents.add_item(layout[1], layout[0])
self.__citationreferents.set_help(
_("Determine the default layout for the "
"Source Page's Citation Referents section"))
addopt("citationreferents", self.__citationreferents)
self.__ancestortree = BooleanOption(_("Include ancestor's tree"), True)
self.__ancestortree.set_help(_('Whether to include an ancestor '
'graph on each individual page'))
addopt("ancestortree", self.__ancestortree)
self.__ancestortree.connect('value-changed', self.__graph_changed)
self.__graphgens = NumberOption(_("Graph generations"), 4, 2, 5)
self.__graphgens.set_help(_("The number of generations to include in "
"the ancestor graph"))
addopt("graphgens", self.__graphgens)
self.__graph_changed()
nogid = BooleanOption(_('Suppress Gramps ID'), False)
nogid.set_help(_('Whether to include the Gramps ID of objects'))
addopt( "nogid", nogid )
def __add_page_generation_options(self, menu):
"""
Options on the "Page Generation" tab.
"""
category_name = _("Page Generation")
addopt = partial(menu.add_option, category_name)
homenote = NoteOption(_('Home page note'))
homenote.set_help(_("A note to be used on the home page"))
addopt("homenote", homenote)
homeimg = MediaOption(_('Home page image'))
homeimg.set_help(_("An image to be used on the home page"))
addopt("homeimg", homeimg)
intronote = NoteOption(_('Introduction note'))
intronote.set_help(_("A note to be used as the introduction"))
addopt("intronote", intronote)
introimg = MediaOption(_('Introduction image'))
introimg.set_help(_("An image to be used as the introduction"))
addopt("introimg", introimg)
contactnote = NoteOption(_("Publisher contact note"))
contactnote.set_help(_("A note to be used as the publisher contact."
"\nIf no publisher information is given,"
"\nno contact page will be created")
)
addopt("contactnote", contactnote)
contactimg = MediaOption(_("Publisher contact image"))
contactimg.set_help(_("An image to be used as the publisher contact."
"\nIf no publisher information is given,"
"\nno contact page will be created")
)
addopt("contactimg", contactimg)
headernote = NoteOption(_('HTML user header'))
headernote.set_help(_("A note to be used as the page header"))
addopt("headernote", headernote)
footernote = NoteOption(_('HTML user footer'))
footernote.set_help(_("A note to be used as the page footer"))
addopt("footernote", footernote)
def __add_images_generation_options(self, menu):
"""
Options on the "Page Generation" tab.
"""
category_name = _("Images Generation")
addopt = partial(menu.add_option, category_name)
self.__gallery = BooleanOption(_("Include images and media objects"),
True)
self.__gallery.set_help(_('Whether to include '
'a gallery of media objects'))
addopt("gallery", self.__gallery)
self.__gallery.connect('value-changed', self.__gallery_changed)
self.__unused = BooleanOption(
_("Include unused images and media objects"), True)
self.__unused.set_help(_('Whether to include unused or unreferenced'
' media objects'))
addopt("unused", self.__unused)
self.__create_thumbs_only = BooleanOption(
_("Create and only use thumbnail- sized images"), False)
self.__create_thumbs_only.set_help(
_("This option allows you to create only thumbnail images "
"instead of the full-sized images on the Media Page. "
"This will allow you to have a much "
"smaller total upload size to your web hosting site."))
addopt("create_thumbs_only", self.__create_thumbs_only)
self.__create_thumbs_only.connect("value-changed",
self.__gallery_changed)
self.__maxinitialimagewidth = NumberOption(
_("Max width of initial image"), _DEFAULT_MAX_IMG_WIDTH, 0, 2000)
self.__maxinitialimagewidth.set_help(
_("This allows you to set the maximum width "
"of the image shown on the media page. Set to 0 for no limit."))
addopt("maxinitialimagewidth", self.__maxinitialimagewidth)
self.__maxinitialimageheight = NumberOption(
_("Max height of initial image"), _DEFAULT_MAX_IMG_HEIGHT, 0, 2000)
self.__maxinitialimageheight.set_help(
_("This allows you to set the maximum height "
"of the image shown on the media page. Set to 0 for no limit."))
addopt("maxinitialimageheight", self.__maxinitialimageheight)
self.__gallery_changed()
def __add_privacy_options(self, menu):
"""
Options on the "Privacy" tab.
"""
category_name = _("Privacy")
stdoptions.add_living_people_option(menu, category_name)
stdoptions.add_private_data_option(menu, category_name, default=False)
addopt = partial(menu.add_option, category_name)
def __add_download_options(self, menu):
"""
Options for the download tab ...
"""
category_name = _("Download")
addopt = partial(menu.add_option, category_name)
self.__incdownload = BooleanOption(_("Include download page"), False)
self.__incdownload.set_help(
_('Whether to include a database download option'))
addopt("incdownload", self.__incdownload)
self.__incdownload.connect('value-changed', self.__download_changed)
self.__down_fname1 = DestinationOption(
_("Download Filename"),
os.path.join(config.get('paths.website-directory'), ""))
self.__down_fname1.set_help(
_("File to be used for downloading of database"))
addopt("down_fname1", self.__down_fname1)
self.__dl_descr1 = StringOption(_("Description for download"),
_('Smith Family Tree'))
self.__dl_descr1.set_help(_('Give a description for this file.'))
addopt("dl_descr1", self.__dl_descr1)
self.__down_fname2 = DestinationOption(
_("Download Filename"),
os.path.join(config.get('paths.website-directory'), ""))
self.__down_fname2.set_help(
_("File to be used for downloading of database"))
addopt("down_fname2", self.__down_fname2)
self.__dl_descr2 = StringOption(_("Description for download"),
_('Johnson Family Tree'))
self.__dl_descr2.set_help(_('Give a description for this file.'))
addopt("dl_descr2", self.__dl_descr2)
self.__download_changed()
def __add_advanced_options(self, menu):
"""
Options on the "Advanced" tab.
"""
category_name = _("Advanced Options")
addopt = partial(menu.add_option, category_name)
encoding = EnumeratedListOption(_('Character set encoding'),
_CHARACTER_SETS[0][1])
for eopt in _CHARACTER_SETS:
encoding.add_item(eopt[1], eopt[0])
encoding.set_help(_("The encoding to be used for the web files"))
addopt("encoding", encoding)
linkhome = BooleanOption(
_('Include link to active person on every page'), False)
linkhome.set_help(
_('Include a link to the active person (if they have a webpage)'))
addopt("linkhome", linkhome)
showbirth = BooleanOption(
_("Include a column for birth dates on the index pages"), True)
showbirth.set_help(_('Whether to include a birth column'))
addopt("showbirth", showbirth)
showdeath = BooleanOption(
_("Include a column for death dates on the index pages"), False)
showdeath.set_help(_('Whether to include a death column'))
addopt("showdeath", showdeath)
showpartner = BooleanOption(_("Include a column for partners on the "
"index pages"), False)
showpartner.set_help(_('Whether to include a partners column'))
menu.add_option(category_name, 'showpartner', showpartner)
showparents = BooleanOption(_("Include a column for parents on the "
"index pages"), False)
showparents.set_help(_('Whether to include a parents column'))
addopt("showparents", showparents)
showallsiblings = BooleanOption(
_("Include half and/ or step-siblings on the individual pages"),
False)
showallsiblings.set_help(
_("Whether to include half and/ or "
"step-siblings with the parents and siblings"))
addopt('showhalfsiblings', showallsiblings)
def __add_advanced_options_2(self, menu):
"""
Continue options on the "Advanced" tab.
"""
category_name = _("Advanced Options (2)")
addopt = partial(menu.add_option, category_name)
birthorder = BooleanOption(
_('Sort all children in birth order'), False)
birthorder.set_help(
_('Whether to display children in birth order or in entry order?'))
addopt("birthorder", birthorder)
inc_families = BooleanOption(_("Include family pages"), False)
inc_families.set_help(_("Whether or not to include family pages."))
addopt("inc_families", inc_families)
inc_events = BooleanOption(_('Include event pages'), False)
inc_events.set_help(
_('Add a complete events list and relevant pages or not'))
addopt("inc_events", inc_events)
inc_repository = BooleanOption(_('Include repository pages'), False)
inc_repository.set_help(
_('Whether or not to include the Repository Pages.'))
addopt("inc_repository", inc_repository)
inc_gendex = BooleanOption(
_('Include GENDEX file (/gendex.txt)'), False)
inc_gendex.set_help(_('Whether to include a GENDEX file or not'))
addopt("inc_gendex", inc_gendex)
inc_addressbook = BooleanOption(_("Include address book pages"), False)
inc_addressbook.set_help(_("Whether or not to add Address Book pages,"
"which can include e-mail and website "
"addresses and personal address/ residence "
"events."))
addopt("inc_addressbook", inc_addressbook)
def __add_place_map_options(self, menu):
"""
options for the Place Map tab.
"""
category_name = _("Place Map Options")
addopt = partial(menu.add_option, category_name)
mapopts = [
[_("OpenStreetMap"), "OpenStreetMap"],
[_("Google"), "Google"]]
self.__mapservice = EnumeratedListOption(_("Map Service"),
mapopts[0][1])
for trans, opt in mapopts:
self.__mapservice.add_item(opt, trans)
self.__mapservice.set_help(_("Choose your choice of map service for "
"creating the Place Map Pages."))
self.__mapservice.connect("value-changed", self.__placemap_options)
addopt("mapservice", self.__mapservice)
self.__placemappages = BooleanOption(
_("Include Place map on Place Pages"), False)
self.__placemappages.set_help(
_("Whether to include a place map on the Place Pages, "
"where Latitude/ Longitude are available."))
self.__placemappages.connect("value-changed", self.__placemap_options)
addopt("placemappages", self.__placemappages)
self.__familymappages = BooleanOption(_("Include Family Map Pages with "
"all places shown on the map"),
False)
self.__familymappages.set_help(
_("Whether or not to add an individual page map "
"showing all the places on this page. "
"This will allow you to see how your family "
"traveled around the country."))
self.__familymappages.connect("value-changed", self.__placemap_options)
addopt("familymappages", self.__familymappages)
googleopts = [
(_("Family Links"), "FamilyLinks"),
(_("Drop"), "Drop"),
(_("Markers"), "Markers")]
self.__googleopts = EnumeratedListOption(_("Google/ FamilyMap Option"),
googleopts[0][1])
for trans, opt in googleopts:
self.__googleopts.add_item(opt, trans)
self.__googleopts.set_help(
_("Select which option that you would like "
"to have for the Google Maps Family Map pages..."))
addopt("googleopts", self.__googleopts)
self.__googlemapkey = StringOption(_("Google maps API key"),"")
self.__googlemapkey.set_help(_("The API key used for the Google maps"))
addopt("googlemapkey", self.__googlemapkey)
self.__placemap_options()
def __add_others_options(self, menu):
"""
Options for the cms tab, web calendar inclusion, php ...
"""
category_name = _("Other inclusion (CMS, Web Calendar, Php)")
addopt = partial(menu.add_option, category_name)
self.__usecms = BooleanOption(
_("Do we include these pages in a cms web ?"), False)
addopt("usecms", self.__usecms)
default_dir = "/NAVWEB"
self.__cms_uri = DestinationOption(_("URI"),
os.path.join(
config.get(
'paths.website-cms-uri'),
default_dir))
self.__cms_uri.set_help(
_("Where do you place your web site ? default = /NAVWEB"))
self.__cms_uri.connect('value-changed', self.__cms_uri_changed)
addopt("cmsuri", self.__cms_uri)
self.__cms_uri_changed()
self.__usecal = BooleanOption(
_("Do we include the web calendar ?"), False)
addopt("usecal", self.__usecal)
default_calendar = "/WEBCAL"
self.__calendar_uri = DestinationOption(_("URI"),
os.path.join(
config.get('paths.website'
'-cal-uri'),
default_calendar))
self.__calendar_uri.set_help(
_("Where do you place your web site ? default = /WEBCAL"))
self.__calendar_uri.connect('value-changed',
self.__calendar_uri_changed)
addopt("caluri", self.__calendar_uri)
self.__calendar_uri_changed()
def __cms_uri_changed(self):
"""
Update the change of storage: archive or directory
"""
self.__target_uri = self.__cms_uri.get_value()
def __calendar_uri_changed(self):
"""
Update the change of storage: Where is the web calendar ?
Possible cases :
1 - /WEBCAL (relative URI to the navweb site)
2 - http://mysite.org/WEBCAL (URL is on another website)
3 - //mysite.org/WEBCAL (PRL depend on the protocol used)
"""
self.__target_cal_uri = self.__calendar_uri.get_value()
def __archive_changed(self):
"""
Update the change of storage: archive or directory
"""
if self.__archive.get_value() == True:
self.__target.set_extension(".tar.gz")
self.__target.set_directory_entry(False)
else:
self.__target.set_directory_entry(True)
def __update_filters(self):
"""
Update the filter list based on the selected person
"""
gid = self.__pid.get_value()
person = self.__db.get_person_from_gramps_id(gid)
filter_list = utils.get_person_filters(person, include_single=False)
self.__filter.set_filters(filter_list)
def __filter_changed(self):
"""
Handle filter change. If the filter is not specific to a person,
disable the person option
"""
filter_value = self.__filter.get_value()
if filter_value == 0: # "Entire Database" (as "include_single=False")
self.__pid.set_available(False)
else:
# The other filters need a center person (assume custom ones too)
self.__pid.set_available(True)
def __stylesheet_changed(self):
"""
Handles the changing nature of the stylesheet
"""
css_opts = self.__css.get_value()
if CSS[css_opts]["navigation"]:
self.__navigation.set_available(True)
else:
self.__navigation.set_available(False)
self.__navigation.set_value("Horizontal")
def __graph_changed(self):
"""
Handle enabling or disabling the ancestor graph
"""
self.__graphgens.set_available(self.__ancestortree.get_value())
def __gallery_changed(self):
"""
Handles the changing nature of gallery
"""
_gallery_option = self.__gallery.get_value()
_create_thumbs_only_option = self.__create_thumbs_only.get_value()
# images and media objects to be used, make all opti8ons available...
if _gallery_option:
self.__create_thumbs_only.set_available(True)
self.__maxinitialimagewidth.set_available(True)
self.__maxinitialimageheight.set_available(True)
# thumbnail-sized images only...
if _create_thumbs_only_option:
self.__maxinitialimagewidth.set_available(False)
self.__maxinitialimageheight.set_available(False)
# full- sized images and Media Pages will be created...
else:
self.__maxinitialimagewidth.set_available(True)
self.__maxinitialimageheight.set_available(True)
# no images or media objects are to be used...
else:
self.__create_thumbs_only.set_available(False)
self.__maxinitialimagewidth.set_available(False)
self.__maxinitialimageheight.set_available(False)
def __download_changed(self):
"""
Handles the changing nature of include download page
"""
if self.__incdownload.get_value():
self.__down_fname1.set_available(True)
self.__dl_descr1.set_available(True)
self.__down_fname2.set_available(True)
self.__dl_descr2.set_available(True)
else:
self.__down_fname1.set_available(False)
self.__dl_descr1.set_available(False)
self.__down_fname2.set_available(False)
self.__dl_descr2.set_available(False)
def __placemap_options(self):
"""
Handles the changing nature of the place map Options
"""
# get values for all Place Map Options tab...
place_active = self.__placemappages.get_value()
family_active = self.__familymappages.get_value()
mapservice_opts = self.__mapservice.get_value()
#google_opts = self.__googleopts.get_value()
if place_active or family_active:
self.__mapservice.set_available(True)
else:
self.__mapservice.set_available(False)
if family_active and mapservice_opts == "Google":
self.__googleopts.set_available(True)
else:
self.__googleopts.set_available(False)
if (place_active or family_active) and mapservice_opts == "Google":
self.__googlemapkey.set_available(True)
else:
self.__googlemapkey.set_available(False)
# FIXME. Why do we need our own sorting? Why not use Sort?
def sort_people(dbase, handle_list, rlocale=glocale):
"""
will sort the database people by surname
"""
sname_sub = defaultdict(list)
sortnames = {}
for person_handle in handle_list:
person = dbase.get_person_from_handle(person_handle)
primary_name = person.get_primary_name()
if primary_name.group_as:
surname = primary_name.group_as
else:
surname = str(
dbase.get_name_group_mapping(
_nd.primary_surname(primary_name)))
# Treat people who have no name with those whose name is just
# 'whitespace'
if surname is None or surname.isspace():
surname = ''
sortnames[person_handle] = _nd.sort_string(primary_name)
sname_sub[surname].append(person_handle)
sorted_lists = []
temp_list = sorted(sname_sub, key=rlocale.sort_key)
for name in temp_list:
slist = sorted(((sortnames[x], x) for x in sname_sub[name]),
key=lambda x: rlocale.sort_key(x[0]))
entries = [x[1] for x in slist]
sorted_lists.append((name, entries))
return sorted_lists
def sort_event_types(dbase, event_types, event_handle_list, rlocale):
"""
sort a list of event types and their associated event handles
@param: dbase -- report database
@param: event_types -- a dict of event types
@param: event_handle_list -- all event handles in this database
"""
event_dict = dict((evt_type, list()) for evt_type in event_types)
for event_handle in event_handle_list:
event = dbase.get_event_from_handle(event_handle)
event_type = rlocale.translation.sgettext(event.get_type().xml_str())
# add (gramps_id, date, handle) from this event
if event_type in event_dict:
sort_value = event.get_date_object().get_sort_value()
event_dict[event_type].append((sort_value, event_handle))
for tup_list in event_dict.values():
tup_list.sort()
# return a list of sorted tuples, one per event
retval = [(event_type, event_list) for (event_type,
event_list) in event_dict.items()]
retval.sort(key=lambda item: str(item[0]))
return retval
# Modified _get_regular_surname from WebCal.py to get prefix, first name,
# and suffix
def _get_short_name(gender, name):
""" Will get suffix for all people passed through it """
short_name = name.get_first_name()
suffix = name.get_suffix()
if suffix:
short_name = short_name + ", " + suffix
return short_name
def __get_person_keyname(dbase, handle):
""" .... """
person = dbase.get_person_from_handle(handle)
return _nd.sort_string(person.get_primary_name())
def __get_place_keyname(dbase, handle):
""" ... """
return utils.place_name(dbase, handle)
# See : http://www.gramps-project.org/bugs/view.php?id = 4423
# Contraction data taken from CLDR 22.1. Only the default variant is considered.
# The languages included below are, by no means, all the langauges that have
# contractions - just a sample of langauges that have been supported
# At the time of writing (Feb 2013), the following langauges have greater that
# 50% coverage of translation of Gramps: bg Bulgarian, ca Catalan, cs Czech, da
# Danish, de German, el Greek, en_GB, es Spanish, fi Finish, fr French, he
# Hebrew, hr Croation, hu Hungarian, it Italian, ja Japanese, lt Lithuanian, nb
# Noregian Bokmål, nn Norwegian Nynorsk, nl Dutch, pl Polish, pt_BR Portuguese
# (Brazil), pt_P Portugeuse (Portugal), ru Russian, sk Slovak, sl Slovenian, sv
# Swedish, vi Vietnamese, zh_CN Chinese.
# Key is the language (or language and country), Value is a list of
# contractions. Each contraction consists of a tuple. First element of the
# tuple is the list of characters, second element is the string to use as the
# index entry.
# The DUCET contractions (e.g. LATIN CAPIAL LETTER L, MIDDLE DOT) are ignored,
# as are the supresscontractions in some locales.
CONTRACTIONS_DICT = {
# bg Bulgarian validSubLocales="bg_BG" no contractions
# ca Catalan validSubLocales="ca_AD ca_ES"
"ca" : [(("l·", "L·"), "L")],
# Czech, validSubLocales="cs_CZ" Czech_Czech Republic
"cs" : [(("ch", "cH", "Ch", "CH"), "CH")],
# Danish validSubLocales="da_DK" Danish_Denmark
"da" : [(("aa", "Aa", "AA"), "Å")],
# de German validSubLocales="de_AT de_BE de_CH de_DE de_LI de_LU" no
# contractions in standard collation.
# el Greek validSubLocales="el_CY el_GR" no contractions.
# es Spanish validSubLocales="es_419 es_AR es_BO es_CL es_CO es_CR es_CU
# es_DO es_EA es_EC es_ES es_GQ es_GT es_HN es_IC es_MX es_NI es_PA es_PE
# es_PH es_PR es_PY es_SV es_US es_UY es_VE" no contractions in standard
# collation.
# fi Finish validSubLocales="fi_FI" no contractions in default (phonebook)
# collation.
# fr French no collation data.
# he Hebrew validSubLocales="he_IL" no contractions
# hr Croation validSubLocales="hr_BA hr_HR"
"hr" : [(("dž", "Dž"), "dž"),
(("lj", "Lj", 'LJ'), "LJ"),
(("Nj", "NJ", "nj"), "NJ")],
# Hungarian hu_HU for two and three character contractions.
"hu" : [(("cs", "Cs", "CS"), "CS"),
(("dzs", "Dzs", "DZS"), "DZS"), # order is important
(("dz", "Dz", "DZ"), "DZ"),
(("gy", "Gy", "GY"), "GY"),
(("ly", "Ly", "LY"), "LY"),
(("ny", "Ny", "NY"), "NY"),
(("sz", "Sz", "SZ"), "SZ"),
(("ty", "Ty", "TY"), "TY"),
(("zs", "Zs", "ZS"), "ZS")
],
# it Italian no collation data.
# ja Japanese unable to process the data as it is too complex.
# lt Lithuanian no contractions.
# Norwegian Bokmål
"nb" : [(("aa", "Aa", "AA"), "Å")],
# nn Norwegian Nynorsk validSubLocales="nn_NO"
"nn" : [(("aa", "Aa", "AA"), "Å")],
# nl Dutch no collation data.
# pl Polish validSubLocales="pl_PL" no contractions
# pt Portuguese no collation data.
# ru Russian validSubLocales="ru_BY ru_KG ru_KZ ru_MD ru_RU ru_UA" no
# contractions
# Slovak, validSubLocales="sk_SK" Slovak_Slovakia
# having DZ in Slovak as a contraction was rejected in
# http://unicode.org/cldr/trac/ticket/2968
"sk" : [(("ch", "cH", "Ch", "CH"), "Ch")],
# sl Slovenian validSubLocales="sl_SI" no contractions
# sv Swedish validSubLocales="sv_AX sv_FI sv_SE" default collation is
# "reformed" no contractions.
# vi Vietnamese validSubLocales="vi_VN" no contractions.
# zh Chinese validSubLocales="zh_Hans zh_Hans_CN zh_Hans_SG" no contractions
# in Latin characters the others are too complex.
}
# The comment below from the glibc locale sv_SE in
# localedata/locales/sv_SE :
#
# % The letter w is normally not present in the Swedish alphabet. It
# % exists in some names in Swedish and foreign words, but is accounted
# % for as a variant of 'v'. Words and names with 'w' are in Swedish
# % ordered alphabetically among the words and names with 'v'. If two
# % words or names are only to be distinguished by 'v' or % 'w', 'v' is
# % placed before 'w'.
#
# See : http://www.gramps-project.org/bugs/view.php?id = 2933
#
# HOWEVER: the characters V and W in Swedish are not considered as a special
# case for several reasons. (1) The default collation for Swedish (called the
# 'reformed' collation type) regards the difference between 'v' and 'w' as a
# primary difference. (2) 'v' and 'w' in the 'standard' (non-default) collation
# type are not a contraction, just a case where the difference is secondary
# rather than primary. (3) There are plenty of other languages where a
# difference that is primary in other languages is secondary, and those are not
# specially handled.
def first_letter(string, rlocale=glocale):
"""
Receives a string and returns the first letter
"""
if string is None or len(string) < 1:
return ' '
norm_unicode = normalize('NFKC', str(string))
contractions = CONTRACTIONS_DICT.get(COLLATE_LANG)
if contractions == None:
contractions = CONTRACTIONS_DICT.get(COLLATE_LANG.split("_")[0])
if contractions is not None:
for contraction in contractions:
count = len(contraction[0][0])
if (len(norm_unicode) >= count and
norm_unicode[:count] in contraction[0]):
return contraction[1]
# no special case
return norm_unicode[0].upper()
try:
import PyICU
PRIM_COLL = PyICU.Collator.createInstance(PyICU.Locale(COLLATE_LANG))
PRIM_COLL.setStrength(PRIM_COLL.PRIMARY)
def primary_difference(prev_key, new_key, rlocale=glocale):
"""
Try to use the PyICU collation.
"""
return PRIM_COLL.compare(prev_key, new_key) != 0
except:
def primary_difference(prev_key, new_key, rlocale=glocale):
"""
The PyICU collation is not available.
Returns true if there is a primary difference between the two parameters
See http://www.gramps-project.org/bugs/view.php?id=2933#c9317 if
letter[i]+'a' < letter[i+1]+'b' and letter[i+1]+'a' < letter[i]+'b' is
true then the letters should be grouped together
The test characters here must not be any that are used in contractions.
"""
return rlocale.sort_key(prev_key + "e") >= \
rlocale.sort_key(new_key + "f") or \
rlocale.sort_key(new_key + "e") >= \
rlocale.sort_key(prev_key + "f")
def get_first_letters(dbase, handle_list, key, rlocale=glocale):
"""
get the first letters of the handle_list
@param: handle_list -- One of a handle list for either person or
place handles or an evt types list
@param: key -- Either a person, place, or event type
The first letter (or letters if there is a contraction) are extracted from
all the objects in the handle list. There may be duplicates, and there may
be letters where there is only a secondary or tertiary difference, not a
primary difference. The list is sorted in collation order. For each group
with secondary or tertiary differences, the first in collation sequence is
retained. For example, assume the default collation sequence (DUCET) and
names Ånström and Apple. These will sort in the order shown. Å and A have a
secondary difference. If the first letter from these names was chosen then
the inex entry would be Å. This is not desirable. Instead, the initial
letters are extracted (Å and A). These are sorted, which gives A and Å. Then
the first of these is used for the index entry.
"""
index_list = []
for handle in handle_list:
if key == _KEYPERSON:
keyname = __get_person_keyname(dbase, handle)
elif key == _KEYPLACE:
keyname = __get_place_keyname(dbase, handle)
else:
if rlocale != glocale:
keyname = rlocale.translation.sgettext(handle)
else:
keyname = handle
ltr = first_letter(keyname)
index_list.append(ltr)
# Now remove letters where there is not a primary difference
index_list.sort(key=rlocale.sort_key)
first = True
prev_index = None
for key in index_list[:]: #iterate over a slice copy of the list
if first or primary_difference(prev_index, key, rlocale):
first = False
prev_index = key
else:
index_list.remove(key)
# return menu set letters for alphabet_navigation
return index_list
def get_index_letter(letter, index_list, rlocale=glocale):
"""
This finds the letter in the index_list that has no primary difference from
the letter provided. See the discussion in get_first_letters above.
Continuing the example, if letter is Å and index_list is A, then this would
return A.
"""
for index in index_list:
if not primary_difference(letter, index, rlocale):
return index
LOG.warning("Initial letter '%s' not found in alphabetic navigation list",
letter)
LOG.debug("filtered sorted index list %s", index_list)
return letter
def alphabet_navigation(index_list, rlocale=glocale):
"""
Will create the alphabet navigation bar for classes IndividualListPage,
SurnameListPage, PlaceListPage, and EventList
@param: index_list -- a dictionary of either letters or words
"""
sorted_set = defaultdict(int)
for menu_item in index_list:
sorted_set[menu_item] += 1
# remove the number of each occurance of each letter
sorted_alpha_index = sorted(sorted_set, key=rlocale.sort_key)
# if no letters, return None to its callers
if not sorted_alpha_index:
return None
num_ltrs = len(sorted_alpha_index)
num_of_cols = 26
num_of_rows = ((num_ltrs // num_of_cols) + 1)
# begin alphabet navigation division
with Html("div", id="alphanav") as alphabetnavigation:
index = 0
for row in range(num_of_rows):
unordered = Html("ul")
cols = 0
while cols <= num_of_cols and index < num_ltrs:
menu_item = sorted_alpha_index[index]
if menu_item == ' ':
menu_item = ' '
# adding title to hyperlink menu for screen readers and
# braille writers
title_str = rlocale.translation.sgettext("Alphabet Menu: %s") % menu_item
hyper = Html("a", menu_item, title=title_str,
href="#%s" % menu_item)
unordered.extend(Html("li", hyper, inline=True))
index += 1
cols += 1
num_of_rows -= 1
alphabetnavigation += unordered
return alphabetnavigation
def _has_webpage_extension(url):
"""
determine if a filename has an extension or not...
@param: url -- filename to be checked
"""
return any(url.endswith(ext) for ext in _WEB_EXT)
def add_birthdate(dbase, ppl_handle_list):
"""
This will sort a list of child handles in birth order
@param: dbase -- The database to use
@param: ppl_handle_list -- the handle for the people
"""
sortable_individuals = []
for person_handle in ppl_handle_list:
birth_date = 0 # dummy value in case none is found
person = dbase.get_person_from_handle(person_handle)
if person:
birth_ref = person.get_birth_ref()
if birth_ref:
birth = dbase.get_event_from_handle(birth_ref.ref)
if birth:
birth_date = birth.get_date_object().get_sort_value()
sortable_individuals.append((birth_date, person_handle))
# return a list of handles with the individual's birthdate attached
return sortable_individuals
def _find_birth_date(dbase, individual):
"""
will look for a birth date within the person's events
@param: dbase -- The database to use
@param: individual -- The individual for who we want to find the birth date
"""
date_out = None
birth_ref = individual.get_birth_ref()
if birth_ref:
birth = dbase.get_event_from_handle(birth_ref.ref)
if birth:
date_out = birth.get_date_object()
date_out.fallback = False
else:
person_evt_ref_list = individual.get_primary_event_ref_list()
if person_evt_ref_list:
for evt_ref in person_evt_ref_list:
event = dbase.get_event_from_handle(evt_ref.ref)
if event:
if event.get_type().is_birth_fallback():
date_out = event.get_date_object()
date_out.fallback = True
LOG.debug("setting fallback to true for '%s'", event)
break
return date_out
def _find_death_date(dbase, individual):
"""
will look for a death date within a person's events
@param: dbase -- The database to use
@param: individual -- The individual for who we want to find the death date
"""
date_out = None
death_ref = individual.get_death_ref()
if death_ref:
death = dbase.get_event_from_handle(death_ref.ref)
if death:
date_out = death.get_date_object()
date_out.fallback = False
else:
person_evt_ref_list = individual.get_primary_event_ref_list()
if person_evt_ref_list:
for evt_ref in person_evt_ref_list:
event = dbase.get_event_from_handle(evt_ref.ref)
if event:
if event.get_type().is_death_fallback():
date_out = event.get_date_object()
date_out.fallback = True
LOG.debug("setting fallback to true for '%s'", event)
break
return date_out
def build_event_data_by_individuals(dbase, ppl_handle_list):
"""
creates a list of event handles and event types for this database
@param: dbase -- The database to use
@param: ppl_handle_list -- the handle for the people
"""
event_handle_list = []
event_types = []
for person_handle in ppl_handle_list:
person = dbase.get_person_from_handle(person_handle)
if person:
evt_ref_list = person.get_event_ref_list()
if evt_ref_list:
for evt_ref in evt_ref_list:
event = dbase.get_event_from_handle(evt_ref.ref)
if event:
event_types.append(str(event.get_type()))
event_handle_list.append(evt_ref.ref)
person_family_handle_list = person.get_family_handle_list()
if person_family_handle_list:
for family_handle in person_family_handle_list:
family = dbase.get_family_from_handle(family_handle)
if family:
family_evt_ref_list = family.get_event_ref_list()
if family_evt_ref_list:
for evt_ref in family_evt_ref_list:
event = dbase.get_event_from_handle(evt_ref.ref)
if event:
event_types.append(str(event.type))
event_handle_list.append(evt_ref.ref)
# return event_handle_list and event types to its caller
return event_handle_list, event_types
|
beernarrd/gramps
|
gramps/plugins/webreport/narrativeweb.py
|
Python
|
gpl-2.0
| 441,735
|
[
"Brian"
] |
588611dd0854ddb7528295d0b9303f0ea27ad5e3793712f5fdea6ef917ab36b0
|
import os
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
#
# DistanceMapRegistration
#
class DistanceMapRegistration(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "DistanceMapRegistration" # TODO make this more human readable by adding spaces
self.parent.categories = ["Examples"]
self.parent.dependencies = []
self.parent.contributors = ["John Doe (AnyWare Corp.)"] # replace with "Firstname Lastname (Organization)"
self.parent.helpText = """
This is an example of scripted loadable module bundled in an extension.
It performs a simple thresholding on the input volume and optionally captures a screenshot.
"""
self.parent.acknowledgementText = """
This file was originally developed by Jean-Christophe Fillion-Robin, Kitware Inc.
and Steve Pieper, Isomics, Inc. and was partially funded by NIH grant 3P41RR013218-12S1.
""" # replace with organization, grant and thanks.
#
# DistanceMapRegistrationWidget
#
class DistanceMapRegistrationWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
# Instantiate and connect widgets ...
#
# Parameters Area
#
parametersCollapsibleButton = ctk.ctkCollapsibleButton()
parametersCollapsibleButton.text = "Parameters"
self.layout.addWidget(parametersCollapsibleButton)
# Layout within the dummy collapsible button
parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)
#
# input volume selector
#
self.inputSelector = slicer.qMRMLNodeComboBox()
self.inputSelector.nodeTypes = ["vtkMRMLScalarVolumeNode"]
self.inputSelector.selectNodeUponCreation = True
self.inputSelector.addEnabled = False
self.inputSelector.removeEnabled = False
self.inputSelector.noneEnabled = False
self.inputSelector.showHidden = False
self.inputSelector.showChildNodeTypes = False
self.inputSelector.setMRMLScene( slicer.mrmlScene )
self.inputSelector.setToolTip( "Pick the input to the algorithm." )
parametersFormLayout.addRow("Input Volume: ", self.inputSelector)
#
# output volume selector
#
self.outputSelector = slicer.qMRMLNodeComboBox()
self.outputSelector.nodeTypes = ["vtkMRMLScalarVolumeNode"]
self.outputSelector.selectNodeUponCreation = True
self.outputSelector.addEnabled = True
self.outputSelector.removeEnabled = True
self.outputSelector.noneEnabled = True
self.outputSelector.showHidden = False
self.outputSelector.showChildNodeTypes = False
self.outputSelector.setMRMLScene( slicer.mrmlScene )
self.outputSelector.setToolTip( "Pick the output to the algorithm." )
parametersFormLayout.addRow("Output Volume: ", self.outputSelector)
#
# threshold value
#
self.imageThresholdSliderWidget = ctk.ctkSliderWidget()
self.imageThresholdSliderWidget.singleStep = 0.1
self.imageThresholdSliderWidget.minimum = -100
self.imageThresholdSliderWidget.maximum = 100
self.imageThresholdSliderWidget.value = 0.5
self.imageThresholdSliderWidget.setToolTip("Set threshold value for computing the output image. Voxels that have intensities lower than this value will set to zero.")
parametersFormLayout.addRow("Image threshold", self.imageThresholdSliderWidget)
#
# check box to trigger taking screen shots for later use in tutorials
#
self.enableScreenshotsFlagCheckBox = qt.QCheckBox()
self.enableScreenshotsFlagCheckBox.checked = 0
self.enableScreenshotsFlagCheckBox.setToolTip("If checked, take screen shots for tutorials. Use Save Data to write them to disk.")
parametersFormLayout.addRow("Enable Screenshots", self.enableScreenshotsFlagCheckBox)
#
# Apply Button
#
self.applyButton = qt.QPushButton("Apply")
self.applyButton.toolTip = "Run the algorithm."
self.applyButton.enabled = False
parametersFormLayout.addRow(self.applyButton)
# connections
self.applyButton.connect('clicked(bool)', self.onApplyButton)
self.inputSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSelect)
self.outputSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSelect)
# Add vertical spacer
self.layout.addStretch(1)
# Refresh Apply button state
self.onSelect()
def cleanup(self):
pass
def onSelect(self):
self.applyButton.enabled = self.inputSelector.currentNode() and self.outputSelector.currentNode()
def onApplyButton(self):
logic = DistanceMapRegistrationLogic()
enableScreenshotsFlag = self.enableScreenshotsFlagCheckBox.checked
imageThreshold = self.imageThresholdSliderWidget.value
logic.run(self.inputSelector.currentNode(), self.outputSelector.currentNode(), imageThreshold, enableScreenshotsFlag)
#
# DistanceMapRegistrationLogic
#
class DistanceMapRegistrationLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def hasImageData(self,volumeNode):
"""This is an example logic method that
returns true if the passed in volume
node has valid image data
"""
if not volumeNode:
logging.debug('hasImageData failed: no volume node')
return False
if volumeNode.GetImageData() == None:
logging.debug('hasImageData failed: no image data in volume node')
return False
return True
def isValidInputOutputData(self, inputVolumeNode, outputVolumeNode):
"""Validates if the output is not the same as input
"""
if not inputVolumeNode:
logging.debug('isValidInputOutputData failed: no input volume node defined')
return False
if not outputVolumeNode:
logging.debug('isValidInputOutputData failed: no output volume node defined')
return False
if inputVolumeNode.GetID()==outputVolumeNode.GetID():
logging.debug('isValidInputOutputData failed: input and output volume is the same. Create a new volume for output to avoid this error.')
return False
return True
def takeScreenshot(self,name,description,type=-1):
# show the message even if not taking a screen shot
slicer.util.delayDisplay('Take screenshot: '+description+'.\nResult is available in the Annotations module.', 3000)
lm = slicer.app.layoutManager()
# switch on the type to get the requested window
widget = 0
if type == slicer.qMRMLScreenShotDialog.FullLayout:
# full layout
widget = lm.viewport()
elif type == slicer.qMRMLScreenShotDialog.ThreeD:
# just the 3D window
widget = lm.threeDWidget(0).threeDView()
elif type == slicer.qMRMLScreenShotDialog.Red:
# red slice window
widget = lm.sliceWidget("Red")
elif type == slicer.qMRMLScreenShotDialog.Yellow:
# yellow slice window
widget = lm.sliceWidget("Yellow")
elif type == slicer.qMRMLScreenShotDialog.Green:
# green slice window
widget = lm.sliceWidget("Green")
else:
# default to using the full window
widget = slicer.util.mainWindow()
# reset the type so that the node is set correctly
type = slicer.qMRMLScreenShotDialog.FullLayout
# grab and convert to vtk image data
qpixMap = qt.QPixmap().grabWidget(widget)
qimage = qpixMap.toImage()
imageData = vtk.vtkImageData()
slicer.qMRMLUtils().qImageToVtkImageData(qimage,imageData)
annotationLogic = slicer.modules.annotations.logic()
annotationLogic.CreateSnapShot(name, description, type, 1, imageData)
def run(self, inputVolume, outputVolume, imageThreshold, enableScreenshots=0):
"""
Run the actual algorithm
"""
if not self.isValidInputOutputData(inputVolume, outputVolume):
slicer.util.errorDisplay('Input volume is the same as output volume. Choose a different output volume.')
return False
logging.info('Processing started')
# Compute the thresholded output volume using the Threshold Scalar Volume CLI module
cliParams = {'InputVolume': inputVolume.GetID(), 'OutputVolume': outputVolume.GetID(), 'ThresholdValue' : imageThreshold, 'ThresholdType' : 'Above'}
cliNode = slicer.cli.run(slicer.modules.thresholdscalarvolume, None, cliParams, wait_for_completion=True)
# Capture screenshot
if enableScreenshots:
self.takeScreenshot('DistanceMapRegistrationTest-Start','MyScreenshot',-1)
logging.info('Processing completed')
return True
class DistanceMapRegistrationTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_DistanceMapRegistration1()
def test_DistanceMapRegistration1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests should exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
self.delayDisplay("Starting the test")
#
# first, get some data
#
import urllib
downloads = (
('http://slicer.kitware.com/midas3/download?items=5767', 'FA.nrrd', slicer.util.loadVolume),
)
for url,name,loader in downloads:
filePath = slicer.app.temporaryPath + '/' + name
if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:
logging.info('Requesting download %s from %s...\n' % (name, url))
urllib.urlretrieve(url, filePath)
if loader:
logging.info('Loading %s...' % (name,))
loader(filePath)
self.delayDisplay('Finished with download and loading')
volumeNode = slicer.util.getNode(pattern="FA")
logic = DistanceMapRegistrationLogic()
self.assertTrue( logic.hasImageData(volumeNode) )
self.delayDisplay('Test passed!')
|
KitwareMedical/AugmentedLabelRegistration
|
DistanceMapRegistration/DistanceMapRegistration.py
|
Python
|
apache-2.0
| 11,258
|
[
"VTK"
] |
003f84e2979ec26fa93cd9fd64939e9f54c0282746dec6e2afa1e044d83f275b
|
"""
:mod: DataManager
.. module: DataManager
:synopsis: DataManager links the functionalities of StorageElement and FileCatalog.
This module consists of DataManager and related classes.
"""
# # imports
from datetime import datetime, timedelta
import fnmatch
import os
import time
import errno
import six
# # from DIRAC
import DIRAC
from DIRAC import S_OK, S_ERROR, gLogger, gConfig
from DIRAC.Core.Utilities import DErrno
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Utilities.Adler import fileAdler, compareAdler
from DIRAC.Core.Utilities.File import makeGuid, getSize
from DIRAC.Core.Utilities.List import randomize, breakListIntoChunks
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.MonitoringSystem.Client.DataOperationSender import DataOperationSender
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
# # RSCID
def _isOlderThan(stringTime, days):
"""Check if a time stamp is older than a given number of days"""
timeDelta = timedelta(days=days)
maxCTime = datetime.utcnow() - timeDelta
# st = time.strptime( stringTime, "%a %b %d %H:%M:%S %Y" )
# cTimeStruct = datetime( st[0], st[1], st[2], st[3], st[4], st[5], st[6], None )
cTimeStruct = stringTime
if cTimeStruct < maxCTime:
return True
return False
def _initialiseAccountingDict(operation, se, files):
"""create Accounting/Monitoring record"""
accountingDict = {}
accountingDict["OperationType"] = operation
result = getProxyInfo()
if not result["OK"]:
userName = "system"
else:
userName = result["Value"].get("username", "unknown")
accountingDict["User"] = userName
accountingDict["Protocol"] = "DataManager"
accountingDict["RegistrationTime"] = 0.0
accountingDict["RegistrationOK"] = 0
accountingDict["RegistrationTotal"] = 0
accountingDict["Destination"] = se
accountingDict["TransferTotal"] = files
accountingDict["TransferOK"] = files
accountingDict["TransferSize"] = files
accountingDict["TransferTime"] = 0.0
accountingDict["FinalStatus"] = "Successful"
accountingDict["Source"] = DIRAC.siteName()
return accountingDict
class DataManager(object):
"""
.. class:: DataManager
A DataManager is taking all the actions that impact or require the FileCatalog and the StorageElement together
"""
def __init__(self, catalogs=None, masterCatalogOnly=False, vo=False):
"""c'tor
:param self: self reference
:param catalogs: the list of catalog in which to perform the operations. This
list will be ignored if masterCatalogOnly is set to True
:param masterCatalogOnly: if set to True, the operations will be performed only on the master catalog.
The catalogs parameter will be ignored.
:param vo: the VO for which the DataManager is created, get VO from the current proxy if not specified
"""
self.log = gLogger.getSubLogger(self.__class__.__name__)
self.voName = vo
if catalogs is None:
catalogs = []
catalogsToUse = FileCatalog(vo=self.voName).getMasterCatalogNames()["Value"] if masterCatalogOnly else catalogs
self.fileCatalog = FileCatalog(catalogs=catalogsToUse, vo=self.voName)
self.accountingClient = None
self.resourceStatus = ResourceStatus()
self.ignoreMissingInFC = Operations(vo=self.voName).getValue("DataManagement/IgnoreMissingInFC", False)
self.useCatalogPFN = Operations(vo=self.voName).getValue("DataManagement/UseCatalogPFN", True)
self.dmsHelper = DMSHelpers(vo=vo)
self.registrationProtocol = self.dmsHelper.getRegistrationProtocols()
self.thirdPartyProtocols = self.dmsHelper.getThirdPartyProtocols()
self.dataOpSender = DataOperationSender()
def setAccountingClient(self, client):
"""Set Accounting Client instance"""
self.accountingClient = client
def __hasAccess(self, opType, path):
"""Check if we have permission to execute given operation on the given file (if exists) or its directory"""
if isinstance(path, six.string_types):
paths = [path]
else:
paths = list(path)
res = self.fileCatalog.hasAccess(paths, opType)
if not res["OK"]:
return res
result = {"Successful": list(), "Failed": list()}
for path in paths:
isAllowed = res["Value"]["Successful"].get(path, False)
if isAllowed:
result["Successful"].append(path)
else:
result["Failed"].append(path)
return S_OK(result)
##########################################################################
#
# These are the bulk removal methods
#
def cleanLogicalDirectory(self, lfnDir):
"""Clean the logical directory from the catalog and storage"""
log = self.log.getSubLogger("cleanLogicalDirectory")
if isinstance(lfnDir, six.string_types):
lfnDir = [lfnDir]
retDict = {"Successful": {}, "Failed": {}}
for folder in lfnDir:
res = self.__cleanDirectory(folder)
if not res["OK"]:
log.debug("Failed to clean directory.", "%s %s" % (folder, res["Message"]))
retDict["Failed"][folder] = res["Message"]
else:
log.debug("Successfully removed directory.", folder)
retDict["Successful"][folder] = res["Value"]
return S_OK(retDict)
def __cleanDirectory(self, folder):
"""delete all files from directory :folder: in FileCatalog and StorageElement
:param self: self reference
:param str folder: directory name
"""
log = self.log.getSubLogger("__cleanDirectory")
res = self.__hasAccess("removeDirectory", folder)
if not res["OK"]:
return res
if folder not in res["Value"]["Successful"]:
errStr = "Write access not permitted for this credential."
log.debug(errStr, folder)
return S_ERROR(errStr)
res = self.__getCatalogDirectoryContents([folder], includeDirectories=True)
if not res["OK"]:
return res
if not res["Value"]:
# folder is empty, just remove it and return
res = returnSingleResult(self.fileCatalog.removeDirectory(folder, recursive=True))
return res
# create a list of folders so that empty folders are also deleted
areDirs = self.fileCatalog.isDirectory(res["Value"])
if not areDirs["OK"]:
return areDirs
listOfFolders = [aDir for aDir in areDirs["Value"]["Successful"] if areDirs["Value"]["Successful"][aDir]]
for lfn in listOfFolders:
res["Value"].pop(lfn)
res = self.removeFile(res["Value"])
if not res["OK"]:
return res
for lfn, reason in res["Value"]["Failed"].items(): # can be an iterator
log.error("Failed to remove file found in the catalog", "%s %s" % (lfn, reason))
res = returnSingleResult(self.removeFile(["%s/dirac_directory" % folder]))
if not res["OK"]:
if not DErrno.cmpError(res, errno.ENOENT):
log.warn("Failed to delete dirac_directory placeholder file")
storageElements = gConfig.getValue("Resources/StorageElementGroups/SE_Cleaning_List", [])
failed = False
for storageElement in sorted(storageElements):
res = self.__removeStorageDirectory(folder, storageElement)
if not res["OK"]:
failed = True
if failed:
return S_ERROR("Failed to clean storage directory at all SEs")
for aFolder in sorted(listOfFolders, reverse=True):
res = returnSingleResult(self.fileCatalog.removeDirectory(aFolder, recursive=True))
log.verbose("Removed folder", "%s: %s" % (aFolder, res))
if not res["OK"]:
return res
res = returnSingleResult(self.fileCatalog.removeDirectory(folder, recursive=True))
if not res["OK"]:
return res
return S_OK()
def __removeStorageDirectory(self, directory, storageElement):
"""delete SE directory
:param self: self reference
:param str directory: folder to be removed
:param str storageElement: DIRAC SE name
"""
se = StorageElement(storageElement, vo=self.voName)
res = returnSingleResult(se.exists(directory))
log = self.log.getSubLogger("__removeStorageDirectory")
if not res["OK"]:
log.debug("Failed to obtain existance of directory", res["Message"])
return res
exists = res["Value"]
if not exists:
log.debug("The directory %s does not exist at %s " % (directory, storageElement))
return S_OK()
res = returnSingleResult(se.removeDirectory(directory, recursive=True))
if not res["OK"]:
log.debug("Failed to remove storage directory", res["Message"])
return res
log.debug(
"Successfully removed %d files from %s at %s" % (res["Value"]["FilesRemoved"], directory, storageElement)
)
return S_OK()
def __getCatalogDirectoryContents(self, directories, includeDirectories=False):
"""ls recursively all files in directories
:param self: self reference
:param list directories: folder names
:param bool includeDirectories: if True includes directories in the return dictionary
:return: S_OK with dict of LFNs and their attribute dictionary
"""
log = self.log.getSubLogger("__getCatalogDirectoryContents")
log.debug("Obtaining the catalog contents for %d directories:" % len(directories))
activeDirs = directories
allFiles = {}
while len(activeDirs) > 0:
currentDir = activeDirs[0]
res = returnSingleResult(self.fileCatalog.listDirectory(currentDir, verbose=True))
activeDirs.remove(currentDir)
if not res["OK"]:
log.debug("Problem getting the %s directory content" % currentDir, res["Message"])
else:
dirContents = res["Value"]
activeDirs.extend(dirContents["SubDirs"])
allFiles.update(dirContents["Files"])
if includeDirectories:
allFiles.update(dirContents["SubDirs"])
log.debug("Found %d files" % len(allFiles))
return S_OK(allFiles)
def getReplicasFromDirectory(self, directory):
"""get all replicas from a given directory
:param self: self reference
:param mixed directory: list of directories or one directory
"""
if isinstance(directory, six.string_types):
directories = [directory]
else:
directories = directory
res = self.__getCatalogDirectoryContents(directories)
if not res["OK"]:
return res
allReplicas = dict((lfn, metadata["Replicas"]) for lfn, metadata in res["Value"].items()) # can be an iterator
return S_OK(allReplicas)
def getFilesFromDirectory(self, directory, days=0, wildcard="*"):
"""get all files from :directory: older than :days: days matching to :wildcard:
:param self: self reference
:param mixed directory: list of directories or directory name
:param int days: ctime days
:param str wildcard: pattern to match
"""
if isinstance(directory, six.string_types):
directories = [directory]
else:
directories = directory
log = self.log.getSubLogger("getFilesFromDirectory")
log.debug("Obtaining the files older than %d days in %d directories:" % (days, len(directories)))
for folder in directories:
log.debug(folder)
activeDirs = directories
allFiles = []
while len(activeDirs) > 0:
currentDir = activeDirs[0]
# We only need the metadata (verbose) if a limit date is given
res = returnSingleResult(self.fileCatalog.listDirectory(currentDir, verbose=(days != 0)))
activeDirs.remove(currentDir)
if not res["OK"]:
log.debug("Error retrieving directory contents", "%s %s" % (currentDir, res["Message"]))
else:
dirContents = res["Value"]
subdirs = dirContents["SubDirs"]
files = dirContents["Files"]
log.debug("%s: %d files, %d sub-directories" % (currentDir, len(files), len(subdirs)))
for subdir in subdirs:
if (not days) or _isOlderThan(subdirs[subdir]["CreationDate"], days):
if subdir[0] != "/":
subdir = currentDir + "/" + subdir
activeDirs.append(subdir)
for fileName in files:
fileInfo = files[fileName]
fileInfo = fileInfo.get("Metadata", fileInfo)
if (not days) or not fileInfo.get("CreationDate") or _isOlderThan(fileInfo["CreationDate"], days):
if wildcard == "*" or fnmatch.fnmatch(fileName, wildcard):
fileName = fileInfo.get("LFN", fileName)
allFiles.append(fileName)
return S_OK(allFiles)
##########################################################################
#
# These are the data transfer methods
#
def getFile(self, lfn, destinationDir="", sourceSE=None):
"""Get a local copy of a LFN from Storage Elements.
'lfn' is the logical file name for the desired file
"""
log = self.log.getSubLogger("getFile")
fileMetadata = {}
if isinstance(lfn, list):
lfns = lfn
elif isinstance(lfn, six.string_types):
lfns = [lfn]
else:
errStr = "Supplied lfn must be string or list of strings."
log.debug(errStr)
return S_ERROR(errStr)
log.debug("Attempting to get %s files." % len(lfns))
res = self.getActiveReplicas(lfns, getUrl=False)
if not res["OK"]:
return res
failed = res["Value"]["Failed"]
lfnReplicas = res["Value"]["Successful"]
# If some files have replicas, check their metadata
if lfnReplicas:
res = self.fileCatalog.getFileMetadata(list(lfnReplicas))
if not res["OK"]:
return res
failed.update(res["Value"]["Failed"])
fileMetadata = res["Value"]["Successful"]
successful = {}
for lfn in fileMetadata:
res = self.__getFile(lfn, lfnReplicas[lfn], fileMetadata[lfn], destinationDir, sourceSE=sourceSE)
if not res["OK"]:
failed[lfn] = res["Message"]
else:
successful[lfn] = res["Value"]
return S_OK({"Successful": successful, "Failed": failed})
def __getFile(self, lfn, replicas, metadata, destinationDir, sourceSE=None):
"""
Method actually doing the job to get a file from storage
"""
log = self.log.getSubLogger("__getFile")
if not replicas:
errStr = "No accessible replicas found"
log.debug(errStr)
return S_ERROR(errStr)
# Determine the best replicas
errTuple = ("No SE", "found")
if sourceSE is None:
sortedSEs = self._getSEProximity(replicas)
else:
if sourceSE not in replicas:
return S_ERROR("No replica at %s" % sourceSE)
else:
sortedSEs = [sourceSE]
for storageElementName in sortedSEs:
se = StorageElement(storageElementName, vo=self.voName)
res = returnSingleResult(se.getFile(lfn, localPath=os.path.realpath(destinationDir)))
if not res["OK"]:
errTuple = (
"Error getting file from storage:",
"%s from %s, %s" % (lfn, storageElementName, res["Message"]),
)
errToReturn = res
else:
localFile = os.path.realpath(os.path.join(destinationDir, os.path.basename(lfn)))
localAdler = fileAdler(localFile)
if metadata["Size"] != res["Value"]:
errTuple = (
"Mismatch of sizes:",
"downloaded = %d, catalog = %d" % (res["Value"], metadata["Size"]),
)
errToReturn = S_ERROR(DErrno.EFILESIZE, errTuple[1])
elif (metadata["Checksum"]) and (not compareAdler(metadata["Checksum"], localAdler)):
errTuple = (
"Mismatch of checksums:",
"downloaded = %s, catalog = %s" % (localAdler, metadata["Checksum"]),
)
errToReturn = S_ERROR(DErrno.EBADCKS, errTuple[1])
else:
return S_OK(localFile)
# If we are here, there was an error, log it debug level
log.debug(errTuple[0], errTuple[1])
log.verbose("Failed to get local copy from any replicas:", "\n%s %s" % errTuple)
return errToReturn
def _getSEProximity(self, replicas):
"""get SE proximity"""
siteName = DIRAC.siteName()
self.__filterTapeSEs(replicas)
localSEs = [se for se in self.dmsHelper.getSEsAtSite(siteName).get("Value", []) if se in replicas]
countrySEs = []
countryCode = str(siteName).split(".")[-1]
res = self.dmsHelper.getSEsAtCountry(countryCode)
if res["OK"]:
countrySEs = [se for se in res["Value"] if se in replicas and se not in localSEs]
sortedSEs = randomize(localSEs) + randomize(countrySEs)
sortedSEs += randomize(se for se in replicas if se not in sortedSEs)
return sortedSEs
def putAndRegister(self, lfn, fileName, diracSE, guid=None, path=None, checksum=None, overwrite=False):
"""Put a local file to a Storage Element and register in the File Catalogues
'lfn' is the file LFN
'file' is the full path to the local file
'diracSE' is the Storage Element to which to put the file
'guid' is the guid with which the file is to be registered (if not provided will be generated)
'path' is the path on the storage where the file will be put (if not provided the LFN will be used)
'overwrite' removes file from the file catalogue and SE before attempting upload
"""
res = self.__hasAccess("addFile", lfn)
if not res["OK"]:
return res
log = self.log.getSubLogger("putAndRegister")
if lfn not in res["Value"]["Successful"]:
errStr = "Write access not permitted for this credential."
log.debug(errStr, lfn)
return S_ERROR(errStr)
# Check that the local file exists
if not os.path.exists(fileName):
errStr = "Supplied file does not exist."
log.debug(errStr, fileName)
return S_ERROR(errStr)
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname(lfn)
# Obtain the size of the local file
size = getSize(fileName)
if size == 0:
errStr = "Supplied file is zero size."
log.debug(errStr, fileName)
return S_ERROR(errStr)
# If the GUID is not given, generate it here
if not guid:
guid = makeGuid(fileName)
if not checksum:
log.debug("Checksum information not provided. Calculating adler32.")
checksum = fileAdler(fileName)
# Make another try
if not checksum:
log.debug("Checksum calculation failed, try again")
checksum = fileAdler(fileName)
if checksum:
log.debug("Checksum calculated to be %s." % checksum)
else:
return S_ERROR(DErrno.EBADCKS, "Unable to calculate checksum")
res = self.fileCatalog.exists({lfn: guid})
if not res["OK"]:
errStr = "Completely failed to determine existence of destination LFN."
log.debug(errStr, lfn)
return res
if lfn not in res["Value"]["Successful"]:
errStr = "Failed to determine existence of destination LFN."
log.debug(errStr, lfn)
return S_ERROR(errStr)
if res["Value"]["Successful"][lfn]:
if res["Value"]["Successful"][lfn] == lfn:
if overwrite:
resRm = self.removeFile(lfn, force=True)
if not resRm["OK"]:
errStr = "Failed to prepare file for overwrite"
log.debug(errStr, lfn)
return resRm
if lfn not in resRm["Value"]["Successful"]:
errStr = "Failed to either delete file or LFN"
log.debug(errStr, lfn)
return S_ERROR("%s %s" % (errStr, lfn))
else:
errStr = "The supplied LFN already exists in the File Catalog."
log.debug(errStr, lfn)
return S_ERROR("%s %s" % (errStr, res["Value"]["Successful"][lfn]))
else:
# If the returned LFN is different, this is the name of a file
# with the same GUID
errStr = "This file GUID already exists for another file"
log.debug(errStr, res["Value"]["Successful"][lfn])
return S_ERROR("%s %s" % (errStr, res["Value"]["Successful"][lfn]))
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement(diracSE, vo=self.voName)
res = storageElement.isValid()
if not res["OK"]:
errStr = "The storage element is not currently valid."
log.verbose(errStr, "%s %s" % (diracSE, res["Message"]))
return S_ERROR("%s %s" % (errStr, res["Message"]))
fileDict = {lfn: fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
startTime = Time.dateTime()
transferStartTime = time.time()
res = returnSingleResult(storageElement.putFile(fileDict))
putTime = time.time() - transferStartTime
accountingDict = _initialiseAccountingDict("putAndRegister", diracSE, 1)
accountingDict["TransferSize"] = size
accountingDict["TransferTime"] = putTime
if not res["OK"]:
# We don't consider it a failure if the SE is not valid
if not DErrno.cmpError(res, errno.EACCES):
accountingDict["TransferOK"] = 0
accountingDict["FinalStatus"] = "Failed"
sendingResult = self.dataOpSender.sendData(
accountingDict, commitFlag=True, startTime=startTime, endTime=Time.dateTime()
)
log.verbose("Committing data operation")
if not sendingResult["OK"]:
log.error("Couldn't commit data operation", sendingResult["Message"])
return sendingResult
log.verbose("Done committing")
log.debug("putAndRegister: Sending took %.1f seconds" % (time.time() - transferStartTime))
errStr = "Failed to put file to Storage Element."
log.debug(errStr, "%s: %s" % (fileName, res["Message"]))
return S_ERROR("%s %s" % (errStr, res["Message"]))
successful[lfn] = {"put": putTime}
###########################################################
# Perform the registration here
destinationSE = storageElement.storageElementName()
res = returnSingleResult(storageElement.getURL(lfn, protocol=self.registrationProtocol))
if not res["OK"]:
errStr = "Failed to generate destination PFN."
log.debug(errStr, res["Message"])
return S_ERROR("%s %s" % (errStr, res["Message"]))
destUrl = res["Value"]
fileTuple = (lfn, destUrl, size, destinationSE, guid, checksum)
registerDict = {
"LFN": lfn,
"PFN": destUrl,
"Size": size,
"TargetSE": destinationSE,
"GUID": guid,
"Addler": checksum,
}
startTime = time.time()
res = self.registerFile(fileTuple)
registerTime = time.time() - startTime
accountingDict["RegistrationTotal"] = 1
accountingDict["RegistrationTime"] = registerTime
if not res["OK"]:
errStr = "Completely failed to register file."
log.debug(errStr, res["Message"])
failed[lfn] = {"register": registerDict}
accountingDict["FinalStatus"] = "Failed"
elif lfn in res["Value"]["Failed"]:
errStr = "Failed to register file."
log.debug(errStr, "%s %s" % (lfn, res["Value"]["Failed"][lfn]))
accountingDict["FinalStatus"] = "Failed"
failed[lfn] = {"register": registerDict}
else:
successful[lfn]["register"] = registerTime
accountingDict["RegistrationOK"] = 1
# Send to Monitoring/Accounting
startTime = time.time()
sendingResult = self.dataOpSender.sendData(accountingDict, commitFlag=True)
log.verbose("Committing data operation")
if not sendingResult["OK"]:
log.error("Couldn't commit data operation", sendingResult["Message"])
return sendingResult
log.verbose("Done committing")
log.debug("putAndRegister: Sending took %.1f seconds" % (time.time() - startTime))
return S_OK({"Successful": successful, "Failed": failed})
def replicateAndRegister(self, lfn, destSE, sourceSE="", destPath="", localCache="", catalog=""):
"""Replicate a LFN to a destination SE and register the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
log = self.log.getSubLogger("replicateAndRegister")
successful = {}
failed = {}
log.debug("Attempting to replicate %s to %s." % (lfn, destSE))
startReplication = time.time()
res = self.__replicate(lfn, destSE, sourceSE, destPath, localCache)
replicationTime = time.time() - startReplication
if not res["OK"]:
errStr = "Completely failed to replicate file."
log.debug(errStr, res["Message"])
return S_ERROR("%s %s" % (errStr, res["Message"]))
if not res["Value"]:
# The file was already present at the destination SE
log.debug("%s already present at %s." % (lfn, destSE))
successful[lfn] = {"replicate": 0, "register": 0}
resDict = {"Successful": successful, "Failed": failed}
return S_OK(resDict)
successful[lfn] = {"replicate": replicationTime}
destPfn = res["Value"]["DestPfn"]
destSE = res["Value"]["DestSE"]
log.debug("Attempting to register %s at %s." % (destPfn, destSE))
replicaTuple = (lfn, destPfn, destSE)
startRegistration = time.time()
res = self.registerReplica(replicaTuple, catalog=catalog)
registrationTime = time.time() - startRegistration
if not res["OK"]:
# Need to return to the client that the file was replicated but not
# registered
errStr = "Completely failed to register replica."
log.debug(errStr, res["Message"])
failed[lfn] = {"Registration": {"LFN": lfn, "TargetSE": destSE, "PFN": destPfn}}
else:
if lfn in res["Value"]["Successful"]:
log.debug("Successfully registered replica.")
successful[lfn]["register"] = registrationTime
else:
errStr = "Failed to register replica."
log.debug(errStr, res["Value"]["Failed"][lfn])
failed[lfn] = {"Registration": {"LFN": lfn, "TargetSE": destSE, "PFN": destPfn}}
return S_OK({"Successful": successful, "Failed": failed})
def replicate(self, lfn, destSE, sourceSE="", destPath="", localCache=""):
"""Replicate a LFN to a destination SE and register the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
log = self.log.getSubLogger("replicate")
log.debug("Attempting to replicate %s to %s." % (lfn, destSE))
res = self.__replicate(lfn, destSE, sourceSE, destPath, localCache)
if not res["OK"]:
errStr = "Replication failed."
log.debug(errStr, "%s %s" % (lfn, destSE))
return res
if not res["Value"]:
# The file was already present at the destination SE
log.debug("%s already present at %s." % (lfn, destSE))
return res
return S_OK(lfn)
def __getSERealName(self, storageName):
"""get the base name of an SE possibly defined as an alias"""
rootConfigPath = "/Resources/StorageElements"
configPath = "%s/%s" % (rootConfigPath, storageName)
res = gConfig.getOptions(configPath)
if not res["OK"]:
errStr = "Failed to get storage options"
return S_ERROR(errStr)
if not res["Value"]:
errStr = "Supplied storage doesn't exist."
return S_ERROR(errStr)
if "Alias" in res["Value"]:
configPath += "/Alias"
aliasName = gConfig.getValue(configPath)
result = self.__getSERealName(aliasName)
if not result["OK"]:
return result
resolvedName = result["Value"]
else:
resolvedName = storageName
return S_OK(resolvedName)
def __isSEInList(self, seName, seList):
"""Check whether an SE is in a list of SEs... All could be aliases"""
seSet = set()
for se in seList:
res = self.__getSERealName(se)
if res["OK"]:
seSet.add(res["Value"])
return self.__getSERealName(seName).get("Value") in seSet
def __replicate(self, lfn, destSEName, sourceSEName="", destPath="", localCache=""):
"""Replicate a LFN to a destination SE.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' if cannot do third party transfer, we do get and put through this local directory
"""
log = self.log.getSubLogger("__replicate")
###########################################################
# Check that we have write permissions to this directory.
res = self.__hasAccess("addReplica", lfn)
if not res["OK"]:
return res
if lfn not in res["Value"]["Successful"]:
errStr = "__replicate: Write access not permitted for this credential."
log.debug(errStr, lfn)
return S_ERROR(errStr)
# Check that the destination storage element is sane and resolve its name
log.debug("Verifying destination StorageElement validity (%s)." % (destSEName))
destStorageElement = StorageElement(destSEName, vo=self.voName)
res = destStorageElement.isValid()
if not res["OK"]:
errStr = "The storage element is not currently valid."
log.verbose(errStr, "%s %s" % (destSEName, res["Message"]))
return S_ERROR("%s %s" % (errStr, res["Message"]))
# Get the real name of the SE
destSEName = destStorageElement.storageElementName()
###########################################################
# Check whether the destination storage element is banned
log.verbose("Determining whether %s ( destination ) is Write-banned." % destSEName)
if not destStorageElement.status()["Write"]:
infoStr = "Supplied destination Storage Element is not currently allowed for Write."
log.debug(infoStr, destSEName)
return S_ERROR(infoStr)
# Get the LFN replicas from the file catalog
log.debug("Attempting to obtain replicas for %s." % (lfn))
res = returnSingleResult(self.getReplicas(lfn, getUrl=False))
if not res["OK"]:
errStr = "Failed to get replicas for LFN."
log.debug(errStr, "%s %s" % (lfn, res["Message"]))
return S_ERROR("%s %s" % (errStr, res["Message"]))
log.debug("Successfully obtained replicas for LFN.")
lfnReplicas = res["Value"]
###########################################################
# If the file catalog size is zero fail the transfer
log.debug("Attempting to obtain size for %s." % lfn)
res = returnSingleResult(self.fileCatalog.getFileSize(lfn))
if not res["OK"]:
errStr = "Failed to get size for LFN."
log.debug(errStr, "%s %s" % (lfn, res["Message"]))
return S_ERROR("%s %s" % (errStr, res["Message"]))
catalogSize = res["Value"]
if catalogSize == 0:
errStr = "Registered file size is 0."
log.debug(errStr, lfn)
return S_ERROR(errStr)
log.debug("File size determined to be %s." % catalogSize)
###########################################################
# If the LFN already exists at the destination we have nothing to do
if self.__isSEInList(destSEName, lfnReplicas):
log.debug("__replicate: LFN is already registered at %s." % destSEName)
return S_OK()
###########################################################
# If the source is specified, check that it is in the replicas
if sourceSEName:
log.debug("Determining whether source Storage Element specified is sane.")
if sourceSEName not in lfnReplicas:
errStr = "LFN does not exist at supplied source SE."
log.error(errStr, "%s %s" % (lfn, sourceSEName))
return S_ERROR(errStr)
# If sourceSE is specified, then we consider this one only, otherwise
# we consider them all
possibleSourceSEs = [sourceSEName] if sourceSEName else lfnReplicas
# We sort the possibileSourceSEs with the SEs that are on the same site than the destination first
# reverse = True because True > False
possibleSourceSEs = sorted(
possibleSourceSEs,
key=lambda x: self.dmsHelper.isSameSiteSE(x, destSEName).get("Value", False),
reverse=True,
)
# In case we manage to find SEs that would work as a source, but we can't negotiate a protocol
# we will do a get and put using one of this sane SE
possibleIntermediateSEs = []
# Take into account the destination path
if destPath:
destPath = "%s/%s" % (destPath, os.path.basename(lfn))
else:
destPath = lfn
for candidateSEName in possibleSourceSEs:
log.debug("Consider %s as a source" % candidateSEName)
# Check that the candidate is active
if not self.__checkSEStatus(candidateSEName, status="Read"):
log.debug("%s is currently not allowed as a source." % candidateSEName)
continue
else:
log.debug("%s is available for use." % candidateSEName)
candidateSE = StorageElement(candidateSEName, vo=self.voName)
# Check that the SE is valid
res = candidateSE.isValid()
if not res["OK"]:
log.verbose("The storage element is not currently valid.", "%s %s" % (candidateSEName, res["Message"]))
continue
else:
log.debug("The storage is currently valid", candidateSEName)
# Check that the file size corresponds to the one in the FC
res = returnSingleResult(candidateSE.getFileSize(lfn))
if not res["OK"]:
log.debug("could not get fileSize on %s" % candidateSEName, res["Message"])
continue
seFileSize = res["Value"]
if seFileSize != catalogSize:
log.debug("Catalog size and physical file size mismatch.", "%s %s" % (catalogSize, seFileSize))
continue
else:
log.debug("Catalog size and physical size match")
res = destStorageElement.negociateProtocolWithOtherSE(candidateSE, protocols=self.thirdPartyProtocols)
if not res["OK"]:
log.debug("Error negotiating replication protocol", res["Message"])
continue
replicationProtocols = res["Value"]
if not replicationProtocols:
possibleIntermediateSEs.append(candidateSE)
log.debug("No protocol suitable for replication found")
continue
log.debug("Found common protocols", replicationProtocols)
# THIS WOULD NOT WORK IF PROTO == file !!
# Why did I write that comment ?!
# We try the protocols one by one
# That obviously assumes that there is an overlap and not only
# a compatibility between the output protocols of the source
# and the input protocols of the destination.
# But that is the only way to make sure we are not replicating
# over ourselves.
for compatibleProtocol in replicationProtocols:
# Compare the urls to make sure we are not overwriting
res = returnSingleResult(candidateSE.getURL(lfn, protocol=compatibleProtocol))
if not res["OK"]:
log.debug("Cannot get sourceURL", res["Message"])
continue
sourceURL = res["Value"]
destURL = ""
res = returnSingleResult(destStorageElement.getURL(destPath, protocol=compatibleProtocol))
if not res["OK"]:
# for some protocols, in particular srm
# you might get an error because the file does not exist
# which is exactly what we want
# in that case, we just keep going with the comparison
# since destURL will be an empty string
if not DErrno.cmpError(res, errno.ENOENT):
log.debug("Cannot get destURL", res["Message"])
continue
log.debug("File does not exist: Expected error for TargetSE !!")
else:
destURL = res["Value"]
if sourceURL == destURL:
log.debug("Same source and destination, give up")
continue
# Attempt the transfer
res = returnSingleResult(
destStorageElement.replicateFile(
{destPath: sourceURL}, sourceSize=catalogSize, inputProtocol=compatibleProtocol
)
)
if not res["OK"]:
log.debug("Replication failed", "%s from %s to %s." % (lfn, candidateSEName, destSEName))
continue
log.debug("Replication successful.", res["Value"])
res = returnSingleResult(destStorageElement.getURL(destPath, protocol=self.registrationProtocol))
if not res["OK"]:
log.debug("Error getting the registration URL", res["Message"])
# it's maybe pointless to try the other candidateSEs...
continue
registrationURL = res["Value"]
return S_OK({"DestSE": destSEName, "DestPfn": registrationURL})
# If we are here, that means that we could not make a third party transfer.
# Check if we have some sane SEs from which we could do a get/put
localDir = os.path.realpath(localCache if localCache else ".")
localFile = os.path.join(localDir, os.path.basename(lfn))
log.debug("Will try intermediate transfer from %s sources" % len(possibleIntermediateSEs))
for candidateSE in possibleIntermediateSEs:
res = returnSingleResult(candidateSE.getFile(lfn, localPath=localDir))
if not res["OK"]:
log.debug("Error getting the file from %s" % candidateSE.name, res["Message"])
continue
res = returnSingleResult(destStorageElement.putFile({destPath: localFile}))
# Remove the local file whatever happened
try:
os.remove(localFile)
except OSError as e:
log.error("Error removing local file", "%s %s" % (localFile, e))
if not res["OK"]:
log.debug("Error putting file coming from %s" % candidateSE.name, res["Message"])
# if the put is the problem, it's maybe pointless to try the other
# candidateSEs...
continue
# get URL with default protocol to return it
res = returnSingleResult(destStorageElement.getURL(destPath, protocol=self.registrationProtocol))
if not res["OK"]:
log.debug("Error getting the registration URL", res["Message"])
# it's maybe pointless to try the other candidateSEs...
continue
registrationURL = res["Value"]
return S_OK({"DestSE": destSEName, "DestPfn": registrationURL})
# If here, we are really doomed
errStr = "Failed to replicate with all sources."
log.debug(errStr, lfn)
return S_ERROR(errStr)
###################################################################
#
# These are the file catalog write methods
#
def registerFile(self, fileTuple, catalog=""):
"""Register a file or a list of files
:param self: self reference
:param tuple fileTuple: (lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum )
:param str catalog: catalog name
"""
log = self.log.getSubLogger("registerFile")
if isinstance(fileTuple, (list, set)):
fileTuples = fileTuple
elif isinstance(fileTuple, tuple):
fileTuples = [fileTuple]
for fileTuple in fileTuples:
if not isinstance(fileTuple, tuple):
errStr = "Supplied file info must be tuple or list of tuples."
log.debug(errStr)
return S_ERROR(errStr)
if not fileTuples:
return S_OK({"Successful": [], "Failed": {}})
log.debug("Attempting to register %s files." % len(fileTuples))
res = self.__registerFile(fileTuples, catalog)
if not res["OK"]:
errStr = "Completely failed to register files."
log.debug(errStr, res["Message"])
return res
return res
def __registerFile(self, fileTuples, catalog):
"""register file to catalog"""
fileDict = {}
for lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum in fileTuples:
fileDict[lfn] = {
"PFN": physicalFile,
"Size": fileSize,
"SE": storageElementName,
"GUID": fileGuid,
"Checksum": checksum,
}
if catalog:
fileCatalog = FileCatalog(catalog, vo=self.voName)
if not fileCatalog.isOK():
return S_ERROR("Can't get FileCatalog %s" % catalog)
else:
fileCatalog = self.fileCatalog
res = fileCatalog.addFile(fileDict)
if not res["OK"]:
errStr = "Completely failed to register files."
self.log.getSubLogger("__registerFile").debug(errStr, res["Message"])
return res
def registerReplica(self, replicaTuple, catalog=""):
"""Register a replica (or list of) supplied in the replicaTuples.
'replicaTuple' is a tuple or list of tuples of the form (lfn,pfn,se)
"""
log = self.log.getSubLogger("registerReplica")
if isinstance(replicaTuple, (list, set)):
replicaTuples = replicaTuple
elif isinstance(replicaTuple, tuple):
replicaTuples = [replicaTuple]
for replicaTuple in replicaTuples:
if not isinstance(replicaTuple, tuple):
errStr = "Supplied file info must be tuple or list of tuples."
log.debug(errStr)
return S_ERROR(errStr)
if not replicaTuples:
return S_OK({"Successful": [], "Failed": {}})
log.debug("Attempting to register %s replicas." % len(replicaTuples))
res = self.__registerReplica(replicaTuples, catalog)
if not res["OK"]:
errStr = "Completely failed to register replicas."
log.debug(errStr, res["Message"])
return res
return res
def __registerReplica(self, replicaTuples, catalog):
"""register replica to catalogue"""
log = self.log.getSubLogger("__registerReplica")
seDict = {}
for lfn, url, storageElementName in replicaTuples:
seDict.setdefault(storageElementName, []).append((lfn, url))
failed = {}
replicaTuples = []
for storageElementName, replicaTuple in seDict.items(): # can be an iterator
destStorageElement = StorageElement(storageElementName, vo=self.voName)
res = destStorageElement.isValid()
if not res["OK"]:
errStr = "The storage element is not currently valid."
log.verbose(errStr, "%s %s" % (storageElementName, res["Message"]))
for lfn, url in replicaTuple:
failed[lfn] = errStr
else:
storageElementName = destStorageElement.storageElementName()
for lfn, url in replicaTuple:
res = returnSingleResult(destStorageElement.getURL(lfn, protocol=self.registrationProtocol))
if not res["OK"]:
failed[lfn] = res["Message"]
else:
replicaTuple = (lfn, res["Value"], storageElementName, False)
replicaTuples.append(replicaTuple)
log.debug("Successfully resolved %s replicas for registration." % len(replicaTuples))
# HACK!
replicaDict = {}
for lfn, url, se, _master in replicaTuples:
replicaDict[lfn] = {"SE": se, "PFN": url}
if catalog:
fileCatalog = FileCatalog(catalog, vo=self.voName)
res = fileCatalog.addReplica(replicaDict)
else:
res = self.fileCatalog.addReplica(replicaDict)
if not res["OK"]:
errStr = "Completely failed to register replicas."
log.debug(errStr, res["Message"])
return S_ERROR("%s %s" % (errStr, res["Message"]))
failed.update(res["Value"]["Failed"])
successful = res["Value"]["Successful"]
resDict = {"Successful": successful, "Failed": failed}
return S_OK(resDict)
###################################################################
#
# These are the removal methods for physical and catalogue removal
#
def removeFile(self, lfn, force=None):
"""Remove the file (all replicas) from Storage Elements and file catalogue
'lfn' is the file to be removed
"""
log = self.log.getSubLogger("removeFile")
if not lfn:
return S_OK({"Successful": {}, "Failed": {}})
if force is None:
force = self.ignoreMissingInFC
if isinstance(lfn, (list, dict, set, tuple)):
lfns = list(lfn)
else:
lfns = [lfn]
for lfn in lfns:
if not isinstance(lfn, six.string_types):
errStr = "Supplied lfns must be string or list of strings."
log.debug(errStr)
return S_ERROR(errStr)
successful = {}
failed = {}
if not lfns:
return S_OK({"Successful": successful, "Failed": failed})
# First check if the file exists in the FC
res = self.fileCatalog.exists(lfns)
if not res["OK"]:
return res
success = res["Value"]["Successful"]
lfns = [lfn for lfn in success if success[lfn]]
if force:
# Files that don't exist are removed successfully
successful = dict.fromkeys((lfn for lfn in success if not success[lfn]), True)
else:
failed = dict.fromkeys((lfn for lfn in success if not success[lfn]), "No such file or directory")
# Check that we have write permissions to this directory and to the file.
if lfns:
res = self.__hasAccess("removeFile", lfns)
if not res["OK"]:
return res
if res["Value"]["Failed"]:
errStr = "Write access not permitted for this credential."
log.debug(errStr, "for %d files" % len(res["Value"]["Failed"]))
failed.update(dict.fromkeys(res["Value"]["Failed"], errStr))
lfns = res["Value"]["Successful"]
if lfns:
log.debug("Attempting to remove %d files from Storage and Catalogue. Get replicas first" % len(lfns))
res = self.fileCatalog.getReplicas(lfns, allStatus=True)
if not res["OK"]:
errStr = "DataManager.removeFile: Completely failed to get replicas for lfns."
log.debug(errStr, res["Message"])
return res
lfnDict = res["Value"]["Successful"]
for lfn, reason in res["Value"]["Failed"].items(): # can be an iterator
# Ignore files missing in FC if force is set
if reason == "No such file or directory" and force:
successful[lfn] = True
elif reason == "File has zero replicas":
lfnDict[lfn] = {}
else:
failed[lfn] = reason
res = self.__removeFile(lfnDict)
if not res["OK"]:
# This can never happen
return res
failed.update(res["Value"]["Failed"])
successful.update(res["Value"]["Successful"])
self.dataOpSender.concludeSending()
return S_OK({"Successful": successful, "Failed": failed})
def __removeFile(self, lfnDict):
"""remove file"""
storageElementDict = {}
# # sorted and reversed
for lfn, repDict in sorted(lfnDict.items(), reverse=True):
for se in repDict:
storageElementDict.setdefault(se, []).append(lfn)
failed = {}
successful = {}
for storageElementName in sorted(storageElementDict):
lfns = storageElementDict[storageElementName]
res = self.__removeReplica(storageElementName, lfns, replicaDict=lfnDict)
if not res["OK"]:
errStr = res["Message"]
for lfn in lfns:
failed[lfn] = failed.setdefault(lfn, "") + " %s" % errStr
else:
for lfn, errStr in res["Value"]["Failed"].items(): # can be an iterator
failed[lfn] = failed.setdefault(lfn, "") + " %s" % errStr
completelyRemovedFiles = set(lfnDict) - set(failed)
if completelyRemovedFiles:
res = self.fileCatalog.removeFile(list(completelyRemovedFiles))
if not res["OK"]:
failed.update(
dict.fromkeys(completelyRemovedFiles, "Failed to remove file from the catalog: %s" % res["Message"])
)
else:
failed.update(res["Value"]["Failed"])
successful = res["Value"]["Successful"]
return S_OK({"Successful": successful, "Failed": failed})
def removeReplica(self, storageElementName, lfn):
"""Remove replica at the supplied Storage Element from Storage Element then file catalogue
'storageElementName' is the storage where the file is to be removed
'lfn' is the file to be removed
"""
log = self.log.getSubLogger("removeReplica")
if isinstance(lfn, (list, dict, set, tuple)):
lfns = set(lfn)
else:
lfns = set([lfn])
for lfn in lfns:
if not isinstance(lfn, six.string_types):
errStr = "Supplied lfns must be string or list of strings."
log.debug(errStr)
return S_ERROR(errStr)
successful = {}
failed = {}
if not lfns:
return S_OK({"Successful": successful, "Failed": failed})
# Check that we have write permissions to this file.
res = self.__hasAccess("removeReplica", lfns)
if not res["OK"]:
log.debug("Error in __verifyWritePermisison", res["Message"])
return res
if res["Value"]["Failed"]:
errStr = "Write access not permitted for this credential."
log.debug(errStr, "for %d files" % len(res["Value"]["Failed"]))
failed.update(dict.fromkeys(res["Value"]["Failed"], errStr))
lfns -= set(res["Value"]["Failed"])
if not lfns:
log.debug("Permission denied for all files")
else:
log.debug("Will remove %s lfns at %s." % (len(lfns), storageElementName))
res = self.fileCatalog.getReplicas(list(lfns), allStatus=True)
if not res["OK"]:
errStr = "Completely failed to get replicas for lfns."
log.debug(errStr, res["Message"])
return res
failed.update(res["Value"]["Failed"])
replicaDict = res["Value"]["Successful"]
lfnsToRemove = set()
for lfn, repDict in replicaDict.items(): # can be an iterator
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to
# remove it
successful[lfn] = True
elif len(repDict) == 1:
# The file has only a single replica so don't remove
log.debug(
"The replica you are trying to remove is the only one.", "%s @ %s" % (lfn, storageElementName)
)
failed[lfn] = "Failed to remove sole replica"
else:
lfnsToRemove.add(lfn)
if lfnsToRemove:
res = self.__removeReplica(storageElementName, lfnsToRemove, replicaDict=replicaDict)
if not res["OK"]:
log.debug("Failed in __removeReplica", res["Message"])
return res
failed.update(res["Value"]["Failed"])
successful.update(res["Value"]["Successful"])
self.dataOpSender.concludeSending()
return S_OK({"Successful": successful, "Failed": failed})
def __removeReplica(self, storageElementName, lfns, replicaDict=None):
"""remove replica
Remove the replica from the storageElement, and then from the catalog
:param storageElementName : The name of the storage Element
:param lfns : list of lfn we want to remove
:param replicaDict : cache of fc.getReplicas(lfns) : { lfn { se : catalog url } }
"""
log = self.log.getSubLogger("__removeReplica")
failed = {}
successful = {}
replicaDict = replicaDict if replicaDict else {}
lfnsToRemove = set()
for lfn in lfns:
res = self.__hasAccess("removeReplica", lfn)
if not res["OK"]:
log.debug("Error in __verifyWritePermission", res["Message"])
return res
if lfn not in res["Value"]["Successful"]:
errStr = "Write access not permitted for this credential."
log.debug(errStr, lfn)
failed[lfn] = errStr
else:
lfnsToRemove.add(lfn)
# Remove physical replicas first
res = self.__removePhysicalReplica(storageElementName, lfnsToRemove, replicaDict=replicaDict)
if not res["OK"]:
errStr = "Failed to remove physical replicas."
log.debug(errStr, res["Message"])
return res
failed.update(res["Value"]["Failed"])
# Here we use the FC PFN...
replicaTuples = [
(lfn, replicaDict[lfn][storageElementName], storageElementName) for lfn in res["Value"]["Successful"]
]
if replicaTuples:
res = self.__removeCatalogReplica(replicaTuples)
if not res["OK"]:
errStr = "Completely failed to remove physical files."
log.debug(errStr, res["Message"])
failed.update(dict.fromkeys((lfn for lfn, _pfn, _se in replicaTuples), res["Message"]))
successful = {}
else:
failed.update(res["Value"]["Failed"])
successful = res["Value"]["Successful"]
return S_OK({"Successful": successful, "Failed": failed})
def removeReplicaFromCatalog(self, storageElementName, lfn):
"""remove :lfn: replica from :storageElementName: SE
:param self: self reference
:param str storageElementName: SE name
:param mixed lfn: a single LFN or list of LFNs
"""
# FIXME: this method is dangerous ans should eventually be removed as well
# as the script dirac-dms-remove-catalog-replicas
log = self.log.getSubLogger("removeReplicaFromCatalog")
# Remove replica from the file catalog 'lfn' are the file
# to be removed 'storageElementName' is the storage where the file is to
# be removed
if isinstance(lfn, (list, dict, set, tuple)):
lfns = list(lfn)
else:
lfns = [lfn]
for lfn in lfns:
if not isinstance(lfn, six.string_types):
errStr = "Supplied lfns must be string or list of strings."
log.debug(errStr)
return S_ERROR(errStr)
successful = {}
failed = {}
if not lfns:
return S_OK({"Successful": successful, "Failed": failed})
log.debug("Will remove catalogue entry for %s lfns at %s." % (len(lfns), storageElementName))
res = self.fileCatalog.getReplicas(lfns, allStatus=True)
if not res["OK"]:
errStr = "Completely failed to get replicas for lfns."
log.debug(errStr, res["Message"])
return res
failed = {}
successful = {}
for lfn, reason in res["Value"]["Failed"].items(): # can be an iterator
if reason in ("No such file or directory", "File has zero replicas"):
successful[lfn] = True
else:
failed[lfn] = reason
replicaTuples = []
for lfn, repDict in res["Value"]["Successful"].items(): # can be an iterator
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove
# it
successful[lfn] = True
else:
replicaTuples.append((lfn, repDict[storageElementName], storageElementName))
log.debug("Resolved %s pfns for catalog removal at %s." % (len(replicaTuples), storageElementName))
res = self.__removeCatalogReplica(replicaTuples)
failed.update(res["Value"]["Failed"])
successful.update(res["Value"]["Successful"])
resDict = {"Successful": successful, "Failed": failed}
return S_OK(resDict)
def __removeCatalogReplica(self, replicaTuples):
"""remove replica form catalogue
:param replicaTuples : list of (lfn, catalogPFN, se)
"""
log = self.log.getSubLogger("__removeCatalogReplica")
startTime = Time.dateTime()
registrationStartTime = time.time()
# HACK!
replicaDict = {}
for lfn, pfn, se in replicaTuples:
replicaDict[lfn] = {"SE": se, "PFN": pfn}
res = self.fileCatalog.removeReplica(replicaDict)
endTime = Time.dateTime()
accountingDict = _initialiseAccountingDict("removeCatalogReplica", "", len(replicaTuples))
accountingDict["RegistrationTime"] = time.time() - registrationStartTime
if not res["OK"]:
accountingDict["RegistrationOK"] = 0
accountingDict["FinalStatus"] = "Failed"
self.dataOpSender.sendData(accountingDict, startTime=startTime, endTime=endTime)
errStr = "Completely failed to remove replica: "
log.debug(errStr, res["Message"])
return S_ERROR("%s %s" % (errStr, res["Message"]))
success = res["Value"]["Successful"]
failed = res["Value"]["Failed"]
for lfn, error in list(failed.items()):
# Ignore error if file doesn't exist
# This assumes all catalogs return an error as { catalog : error }
for catalog, err in list(error.items()):
if "no such file" in err.lower():
success.setdefault(lfn, {}).update({catalog: True})
error.pop(catalog)
if not failed[lfn]:
failed.pop(lfn)
else:
log.error("Failed to remove replica.", "%s %s" % (lfn, error))
# Only for logging information
if success:
log.debug("Removed %d replicas" % len(success))
for lfn in success:
log.debug("Successfully removed replica.", lfn)
accountingDict["RegistrationOK"] = len(success)
self.dataOpSender.sendData(accountingDict, startTime=startTime, endTime=endTime)
return res
def __removePhysicalReplica(self, storageElementName, lfnsToRemove, replicaDict=None):
"""remove replica from storage element
:param storageElementName : name of the storage Element
:param lfnsToRemove : set of lfn to removes
:param replicaDict : cache of fc.getReplicas, to be passed to the SE
"""
log = self.log.getSubLogger("__removePhysicalReplica")
log.debug("Attempting to remove %s pfns at %s." % (len(lfnsToRemove), storageElementName))
storageElement = StorageElement(storageElementName, vo=self.voName)
res = storageElement.isValid()
if not res["OK"]:
errStr = "The storage element is not currently valid."
log.verbose(errStr, "%s %s" % (storageElementName, res["Message"]))
return S_ERROR("%s %s" % (errStr, res["Message"]))
startTime = Time.dateTime()
transferStartTime = time.time()
lfnsToRemove = list(lfnsToRemove)
ret = storageElement.getFileSize(lfnsToRemove, replicaDict=replicaDict)
deletedSizes = ret.get("Value", {}).get("Successful", {})
res = storageElement.removeFile(lfnsToRemove, replicaDict=replicaDict)
endTime = Time.dateTime()
accountingDict = _initialiseAccountingDict("removePhysicalReplica", storageElementName, len(lfnsToRemove))
accountingDict["TransferTime"] = time.time() - transferStartTime
if not res["OK"]:
accountingDict["TransferOK"] = 0
accountingDict["FinalStatus"] = "Failed"
self.dataOpSender.sendData(accountingDict, startTime=startTime, endTime=endTime)
log.debug("Failed to remove replicas.", res["Message"])
else:
for lfn, value in list(res["Value"]["Failed"].items()):
if "No such file or directory" in value:
res["Value"]["Successful"][lfn] = lfn
res["Value"]["Failed"].pop(lfn)
for lfn in res["Value"]["Successful"]:
res["Value"]["Successful"][lfn] = True
deletedSize = sum(deletedSizes.get(lfn, 0) for lfn in res["Value"]["Successful"])
accountingDict["TransferSize"] = deletedSize
accountingDict["TransferOK"] = len(res["Value"]["Successful"])
self.dataOpSender.sendData(accountingDict, startTime=startTime, endTime=endTime)
infoStr = "Successfully issued accounting removal request."
log.debug(infoStr)
return res
#########################################################################
#
# File transfer methods
#
def put(self, lfn, fileName, diracSE, path=None):
"""Put a local file to a Storage Element
:param self: self reference
:param str lfn: LFN
:param str fileName: the full path to the local file
:param str diracSE: the Storage Element to which to put the file
:param str path: the path on the storage where the file will be put (if not provided the LFN will be used)
"""
log = self.log.getSubLogger("put")
# Check that the local file exists
if not os.path.exists(fileName):
errStr = "Supplied file does not exist."
log.debug(errStr, fileName)
return S_ERROR(errStr)
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname(lfn)
# Obtain the size of the local file
size = getSize(fileName)
if size == 0:
errStr = "Supplied file is zero size."
log.debug(errStr, fileName)
return S_ERROR(errStr)
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement(diracSE, vo=self.voName)
res = storageElement.isValid()
if not res["OK"]:
errStr = "The storage element is not currently valid."
log.verbose(errStr, "%s %s" % (diracSE, res["Message"]))
return S_ERROR("%s %s" % (errStr, res["Message"]))
fileDict = {lfn: fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
startTime = time.time()
res = returnSingleResult(storageElement.putFile(fileDict))
putTime = time.time() - startTime
if not res["OK"]:
errStr = "Failed to put file to Storage Element."
failed[lfn] = res["Message"]
log.debug(errStr, "%s: %s" % (fileName, res["Message"]))
else:
log.debug("Put file to storage in %s seconds." % putTime)
successful[lfn] = res["Value"]
resDict = {"Successful": successful, "Failed": failed}
return S_OK(resDict)
#########################################################################
#
# File catalog methods
#
def getActiveReplicas(self, lfns, getUrl=True, diskOnly=False, preferDisk=False):
"""Get all the replicas for the SEs which are in Active status for reading."""
return self.getReplicas(
lfns, allStatus=False, getUrl=getUrl, diskOnly=diskOnly, preferDisk=preferDisk, active=True
)
def __filterTapeReplicas(self, replicaDict, diskOnly=False):
"""
Check a replica dictionary for disk replicas:
If there is a disk replica, removetape replicas, else keep all
The input argument is modified
"""
seList = set(se for ses in replicaDict["Successful"].values() for se in ses) # can be an iterator
# Get a cache of SE statuses for long list of replicas
seStatus = dict(
(se, (self.__checkSEStatus(se, status="DiskSE"), self.__checkSEStatus(se, status="TapeSE")))
for se in seList
)
# Beware, there is a del below
for lfn, replicas in list(replicaDict["Successful"].items()):
self.__filterTapeSEs(replicas, diskOnly=diskOnly, seStatus=seStatus)
# If diskOnly, one may not have any replica in the end, set Failed
if diskOnly and not replicas:
del replicaDict["Successful"][lfn]
replicaDict["Failed"][lfn] = "No disk replicas"
return
def __filterReplicasForJobs(self, replicaDict):
"""Remove the SEs that are not to be used for jobs, and archive SEs if there are others
The input argument is modified
"""
seList = set(se for ses in replicaDict["Successful"].values() for se in ses) # can be an iterator
# Get a cache of SE statuses for long list of replicas
seStatus = dict((se, (self.dmsHelper.isSEForJobs(se), self.dmsHelper.isSEArchive(se))) for se in seList)
# Beware, there is a del below
for lfn, replicas in list(replicaDict["Successful"].items()):
otherThanArchive = set(se for se in replicas if not seStatus[se][1])
for se in list(replicas):
# Remove the SE if it should not be used for jobs or if it is an
# archive and there are other SEs
if not seStatus[se][0] or (otherThanArchive and seStatus[se][1]):
replicas.pop(se)
# If in the end there is no replica, set Failed
if not replicas:
del replicaDict["Successful"][lfn]
replicaDict["Failed"][lfn] = "No replicas for jobs"
return
def __filterTapeSEs(self, replicas, diskOnly=False, seStatus=None):
"""Remove the tape SEs as soon as there is one disk SE or diskOnly is requested
The input argument is modified
"""
# Build the SE status cache if not existing
if seStatus is None:
seStatus = dict(
(se, (self.__checkSEStatus(se, status="DiskSE"), self.__checkSEStatus(se, status="TapeSE")))
for se in replicas
)
for se in replicas: # There is a del below but we then return!
# First find a disk replica, otherwise do nothing unless diskOnly is set
if diskOnly or seStatus[se][0]:
# There is one disk replica, remove tape replicas and exit loop
for se in list(replicas): # Beware: there is a pop below
if seStatus[se][1]:
replicas.pop(se)
return
return
def checkActiveReplicas(self, replicaDict):
"""
Check a replica dictionary for active replicas, and verify input structure first
"""
if not isinstance(replicaDict, dict):
return S_ERROR("Wrong argument type %s, expected a dictionary" % type(replicaDict))
for key in ["Successful", "Failed"]:
if key not in replicaDict:
return S_ERROR('Missing key "%s" in replica dictionary' % key)
if not isinstance(replicaDict[key], dict):
return S_ERROR("Wrong argument type %s, expected a dictionary" % type(replicaDict[key]))
activeDict = {"Successful": {}, "Failed": replicaDict["Failed"].copy()}
for lfn, replicas in replicaDict["Successful"].items(): # can be an iterator
if not isinstance(replicas, dict):
activeDict["Failed"][lfn] = "Wrong replica info"
else:
activeDict["Successful"][lfn] = replicas.copy()
self.__filterActiveReplicas(activeDict)
return S_OK(activeDict)
def __filterActiveReplicas(self, replicaDict):
"""
Check a replica dictionary for active replicas
The input dict is modified, no returned value
"""
seList = set(se for ses in replicaDict["Successful"].values() for se in ses) # can be an iterator
# Get a cache of SE statuses for long list of replicas
seStatus = dict((se, self.__checkSEStatus(se, status="Read")) for se in seList)
for replicas in replicaDict["Successful"].values(): # can be an iterator
for se in list(replicas): # Beware: there is a pop below
if not seStatus[se]:
replicas.pop(se)
return
def __checkSEStatus(self, se, status="Read"):
"""returns the value of a certain SE status flag (access or other)"""
return StorageElement(se, vo=self.voName).status().get(status, False)
def getReplicas(self, lfns, allStatus=True, getUrl=True, diskOnly=False, preferDisk=False, active=False):
"""get replicas from catalogue and filter if requested
Warning: all filters are independent, hence active and preferDisk should be set if using forJobs
"""
catalogReplicas = {}
failed = {}
for lfnChunk in breakListIntoChunks(lfns, 1000):
res = self.fileCatalog.getReplicas(lfnChunk, allStatus=allStatus)
if res["OK"]:
catalogReplicas.update(res["Value"]["Successful"])
failed.update(res["Value"]["Failed"])
else:
return res
if not getUrl:
for lfn in catalogReplicas:
catalogReplicas[lfn] = dict.fromkeys(catalogReplicas[lfn], True)
elif not self.useCatalogPFN:
se_lfn = {}
# We group the query to getURL by storage element to gain in speed
for lfn in catalogReplicas:
for se in catalogReplicas[lfn]:
se_lfn.setdefault(se, []).append(lfn)
for se in se_lfn:
seObj = StorageElement(se, vo=self.voName)
succPfn = (
seObj.getURL(se_lfn[se], protocol=self.registrationProtocol).get("Value", {}).get("Successful", {})
)
for lfn in succPfn:
catalogReplicas[lfn][se] = succPfn[lfn]
result = {"Successful": catalogReplicas, "Failed": failed}
if active:
self.__filterActiveReplicas(result)
if diskOnly or preferDisk:
self.__filterTapeReplicas(result, diskOnly=diskOnly)
return S_OK(result)
def getReplicasForJobs(self, lfns, allStatus=False, getUrl=True, diskOnly=False):
"""get replicas useful for jobs"""
# Call getReplicas with no filter and enforce filters in this method
result = self.getReplicas(lfns, allStatus=allStatus, getUrl=getUrl)
if not result["OK"]:
return result
replicaDict = result["Value"]
# For jobs replicas must be active
self.__filterActiveReplicas(replicaDict)
# For jobs, give preference to disk replicas but not only
self.__filterTapeReplicas(replicaDict, diskOnly=diskOnly)
# don't use SEs excluded for jobs (e.g. Failover)
self.__filterReplicasForJobs(replicaDict)
return S_OK(replicaDict)
# 3
# Methods from the catalogToStorage. It would all work with the direct call to the SE, but this checks
# first if the replica is known to the catalog
def __executeIfReplicaExists(self, storageElementName, lfn, method, **kwargs):
"""a simple wrapper that allows replica querying then perform the StorageElement operation
:param self: self reference
:param str storageElementName: DIRAC SE name
:param mixed lfn: a LFN str, list of LFNs or dict with LFNs as keys
"""
log = self.log.getSubLogger("__executeIfReplicaExists")
# # default value
kwargs = kwargs if kwargs else {}
# # get replicas for lfn
res = FileCatalog(vo=self.voName).getReplicas(lfn)
if not res["OK"]:
errStr = "Completely failed to get replicas for LFNs."
log.debug(errStr, res["Message"])
return res
# # returned dict, get failed replicase
retDict = {"Failed": res["Value"]["Failed"], "Successful": {}}
# # print errors
for lfn, reason in retDict["Failed"].items(): # can be an iterator
log.error("_callReplicaSEFcn: Failed to get replicas for file.", "%s %s" % (lfn, reason))
# # good replicas
lfnReplicas = res["Value"]["Successful"]
# # store PFN to LFN mapping
lfnList = []
for lfn, replicas in lfnReplicas.items(): # can be an iterator
if storageElementName in replicas:
lfnList.append(lfn)
else:
errStr = "File hasn't got replica at supplied Storage Element."
log.error(errStr, "%s %s" % (lfn, storageElementName))
retDict["Failed"][lfn] = errStr
if "replicaDict" not in kwargs:
kwargs["replicaDict"] = lfnReplicas
# # call StorageElement function at least
se = StorageElement(storageElementName, vo=self.voName)
fcn = getattr(se, method)
res = fcn(lfnList, **kwargs)
# # check result
if not res["OK"]:
errStr = "Failed to execute %s StorageElement method." % method
log.error(errStr, res["Message"])
return res
# # filter out failed and successful
retDict["Successful"].update(res["Value"]["Successful"])
retDict["Failed"].update(res["Value"]["Failed"])
return S_OK(retDict)
def getReplicaIsFile(self, lfn, storageElementName):
"""determine whether the supplied lfns are files at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "isFile")
def getReplicaSize(self, lfn, storageElementName):
"""get the size of files for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "getFileSize")
def getReplicaAccessUrl(self, lfn, storageElementName, protocol=False):
"""get the access url for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "getURL", protocol=protocol)
def getReplicaMetadata(self, lfn, storageElementName):
"""get the file metadata for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "getFileMetadata")
def prestageReplica(self, lfn, storageElementName, lifetime=86400):
"""issue a prestage requests for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "prestageFile", lifetime=lifetime)
def pinReplica(self, lfn, storageElementName, lifetime=86400):
"""pin the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "pinFile", lifetime=lifetime)
def releaseReplica(self, lfn, storageElementName):
"""release pins for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "releaseFile")
def getReplica(self, lfn, storageElementName, localPath=False):
"""copy replicas from DIRAC SE to local directory
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param mixed localPath: path in the local file system, if False, os.getcwd() will be used
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "getFile", localPath=localPath)
def __del__(self):
self.dataOpSender.concludeSending()
|
DIRACGrid/DIRAC
|
src/DIRAC/DataManagementSystem/Client/DataManager.py
|
Python
|
gpl-3.0
| 83,144
|
[
"DIRAC"
] |
5091028ec5956665e26f225f5aff31b0fa04c50fb0c1802aee96c8fa3900ba1b
|
import json
from pyramid.renderers import render, Response
from src.sgd.frontend import config
from pyramid.view import notfound_view_config
from src.sgd.frontend.yeastgenome import send_message
from src.sgd.tools.blast import do_blast
from src.sgd.tools.patmatch import do_patmatch
from src.sgd.tools.seqtools import do_seq_analysis
from src.sgd.tools.gotools import do_gosearch
from src.sgd.tools.alignment import get_s3_data
from src.sgd.tools.restrictionmapper import do_restmap
def prep_views(chosen_frontend, config):
# some logic (NOT all) has been moved to views to be more 'pyramid-y'
config.scan('src.sgd.frontend.yeastgenome.views.misc_views')
config.scan('src.sgd.frontend.yeastgenome.views.locus_views')
# misc pages from misc_views
config.add_route('healthcheck', '/healthcheck')
config.add_route('redirect_no_overview', '/{ignore}/overview')
config.add_route('redirect_no_overview_long', '/{ignore_a}/{ignore_b}/overview')
config.add_route('home', '/')
config.add_route('blast_fungal', '/blast-fungal')
config.add_route('blast_sgd', '/blast-sgd')
config.add_route('patmatch', '/nph-patmatch')
config.add_route('restrictionmapper', '/restrictionMapper')
config.add_route('seq_tools', '/seqTools')
config.add_route('gotermfinder', '/goTermFinder')
config.add_route('goslimmapper', '/goSlimMapper')
config.add_route('strain_alignment', '/strainAlignment')
config.add_route('complex', '/complex/{identifier}')
config.add_route('complex_literature_details', '/complex/{identifier}/literature')
config.add_route('complex_go_details', '/complex/{identifier}/go')
config.add_route('allele', '/allele/{identifier}')
config.add_route('allele_literature_details', '/allele/{identifier}/literature')
config.add_route('blog_post', '/blog/{slug}')
config.add_route('blog_index', '/blog')
config.add_route('blog_archive', '/blog/archive/{year}')
config.add_route('blog_category', '/blog/category/{category}')
config.add_route('blog_tag', '/blog/tag/{tag}')
config.add_route('colleague_show', '/colleague/{identifier}')
config.add_route('downloads', '/downloads')
config.add_route('api_portal', '/api')
config.add_route('api_doc', '/api/doc')
config.add_route('interaction_search', '/interaction-search')
config.add_route('download_list', '/download-list')
config.add_route('snapshot', '/genomesnapshot')
config.add_route('style_guide', '/style-guide')
config.add_route('suggestion', '/suggestion')
config.add_route('variant_viewer', '/variant-viewer')
config.add_route('search', '/search')
config.add_route('primer3', '/primer3')
# config.add_route('example', '/example')
# locus pages from locus_views
config.add_route('locus', '/locus/{identifier}')
config.add_route('sequence_details', '/locus/{identifier}/sequence')
config.add_route('protein_details', '/locus/{identifier}/protein')
config.add_route('go_details', '/locus/{identifier}/go')
config.add_route('disease_details', '/locus/{identifier}/disease')
config.add_route('phenotype_details', '/locus/{identifier}/phenotype')
config.add_route('interaction_details', '/locus/{identifier}/interaction')
config.add_route('regulation_details', '/locus/{identifier}/regulation')
config.add_route('expression_details', '/locus/{identifier}/expression')
config.add_route('literature_details', '/locus/{identifier}/literature')
config.add_route('homology_details', '/locus/{identifier}/homology')
config.add_route('curator_sequence', '/curator/locus/{identifier}/sequence')
# references
config.add_route('references_this_week', '/reference/recent')
config.add_route('reference', '/reference/{identifier}')
config.add_route('phenotype', '/phenotype/{identifier}')
# public CI
config.add_route('new_gene_name_reservation', 'reserved_name/new')
config.add_route('new_colleague', 'colleague_update')
config.add_route('submit_data', '/submitData')
config.add_route('author', '/author/{identifier}')
config.add_view(lambda request: chosen_frontend.response_wrapper('author', request)(getattr(chosen_frontend, 'author')(request.matchdict['identifier'])),
renderer=chosen_frontend.get_renderer('author'),
route_name='author')
config.add_route('strain', '/strain/{identifier}')
config.add_view(lambda request: chosen_frontend.response_wrapper('strain', request)(getattr(chosen_frontend, 'strain')(request.matchdict['identifier'])),
renderer=chosen_frontend.get_renderer('strain'),
route_name='strain')
config.add_route('redirect', '/redirect/{page}')
config.add_view(lambda request: getattr(chosen_frontend, 'redirect')(page=request.matchdict['page'], params=request.GET),
renderer=chosen_frontend.get_renderer('redirect'),
route_name='redirect')
config.add_route('header', '/header')
config.add_view(lambda request: {'header': render('static/templates/header.jinja2', {})},
renderer=chosen_frontend.get_renderer('header'),
route_name='header')
config.add_route('footer', '/footer')
config.add_view(lambda request: {'footer': render('static/templates/footer.jinja2', {})},
renderer=chosen_frontend.get_renderer('footer'),
route_name='footer')
config.add_route('download_table', '/download_table')
config.add_view(lambda request: chosen_frontend.response_wrapper('download_table', request)(
getattr(chosen_frontend, 'download_table')(
response=request.response,
header_info=None if 'headers' not in request.POST else json.loads(request.POST['headers']),
data=None if 'data' not in request.POST else json.loads(request.POST['data']),
display_name=None if 'display_name' not in request.POST else request.POST['display_name'])),
renderer=chosen_frontend.get_renderer('download_table'),
route_name='download_table')
config.add_route('download_image', '/download_image')
config.add_view(lambda request: chosen_frontend.response_wrapper('download_image', request)(
getattr(chosen_frontend, 'download_image')(
response = request.response,
data = None if 'data' not in request.POST else request.POST['data'],
display_name = None if 'display_name' not in request.POST else request.POST['display_name'])),
renderer=chosen_frontend.get_renderer('download_image'),
route_name='download_image')
config.add_route('download_citations', '/download_citations')
config.add_view(lambda request: chosen_frontend.response_wrapper('download_citations', request)(
getattr(chosen_frontend, 'download_citations')(
response = request.response,
reference_ids = [] if 'reference_ids' not in request.POST else request.POST['reference_ids'].split(','),
display_name = None if 'display_name' not in request.POST else request.POST['display_name'])),
renderer=chosen_frontend.get_renderer('download_citations'),
route_name='download_citations')
config.add_route('download_sequence', '/download_sequence')
config.add_view(lambda request: chosen_frontend.response_wrapper('download_sequence', request)(
getattr(chosen_frontend, 'download_sequence')(
response = request.response,
sequence = None if 'sequence' not in request.POST else request.POST['sequence'],
header = None if 'header' not in request.POST else request.POST['header'],
filename = None if 'filename' not in request.POST else request.POST['filename'])),
renderer=chosen_frontend.get_renderer('download_sequence'),
route_name='download_sequence')
config.add_route('analyze', '/analyze')
config.add_view(lambda request: chosen_frontend.response_wrapper('analyze', request)(
getattr(chosen_frontend, 'analyze')(
bioent_ids = None if 'bioent_ids' not in request.POST else json.loads(request.POST['bioent_ids']),
list_name = None if 'list_name' not in request.POST else request.POST['list_name'])),
renderer=chosen_frontend.get_renderer('analyze'),
route_name='analyze')
config.add_route('enrichment', '/enrichment')
config.add_view(lambda request: chosen_frontend.response_wrapper('enrichment', request)(
getattr(chosen_frontend, 'enrichment')(
bioent_ids = None if 'bioent_ids' not in request.json_body else request.json_body['bioent_ids'])),
renderer=chosen_frontend.get_renderer('enrichment'),
route_name='enrichment')
# observable root of the ontology, must be redirected to the ypo page
config.add_route('phenotype_ontology_apo', '/observable/APO:0000017')
config.add_view(lambda request: chosen_frontend.response_wrapper('phenotype_ontology', request)(getattr(chosen_frontend, 'phenotype_ontology')()),
renderer=chosen_frontend.get_renderer('phenotype_ontology'),
route_name='phenotype_ontology_apo')
config.add_route('observable', '/observable/{identifier}')
config.add_view(lambda request: chosen_frontend.response_wrapper('observable', request)(getattr(chosen_frontend, 'observable')(biocon_repr=request.matchdict['identifier'].lower())),
renderer=chosen_frontend.get_renderer('observable'),
route_name='observable')
config.add_route('phenotype_ontology', '/ontology/phenotype/ypo')
config.add_view(lambda request: chosen_frontend.response_wrapper('phenotype_ontology', request)(getattr(chosen_frontend, 'phenotype_ontology')()),
renderer=chosen_frontend.get_renderer('phenotype_ontology'),
route_name='phenotype_ontology')
config.add_route('ecnumber', '/ecnumber/{identifier}')
config.add_view(lambda request: chosen_frontend.response_wrapper('ecnumber', request)(getattr(chosen_frontend, 'ecnumber')(biocon_repr=request.matchdict['identifier'].lower())),
renderer=chosen_frontend.get_renderer('ecnumber'),
route_name='ecnumber')
config.add_route('go', '/go/{identifier}')
config.add_view(lambda request: chosen_frontend.response_wrapper('go', request)(getattr(chosen_frontend, 'go')(biocon_repr=request.matchdict['identifier'].lower())),
renderer=chosen_frontend.get_renderer('go'),
route_name='go')
config.add_route('go_ontology', '/ontology/go/{identifier}')
config.add_view(lambda request: chosen_frontend.response_wrapper('go_ontology', request)(getattr(chosen_frontend, 'go_ontology')(biocon_repr=request.matchdict['identifier'].lower())),
renderer=chosen_frontend.get_renderer('go_ontology'),
route_name='go_ontology')
config.add_route('disease', '/disease/{identifier}')
config.add_view(lambda request: chosen_frontend.response_wrapper('disease', request)(getattr(chosen_frontend, 'disease')(biocon_repr=request.matchdict['identifier'].lower())),
renderer=chosen_frontend.get_renderer('disease'),
route_name='disease')
config.add_route('disease_ontology', '/ontology/disease/{identifier}')
config.add_view(lambda request: chosen_frontend.response_wrapper('disease_ontology', request)(getattr(chosen_frontend, 'disease_ontology')(biocon_repr=request.matchdict['identifier'].lower())),
renderer=chosen_frontend.get_renderer('disease_ontology'),
route_name='disease_ontology')
config.add_route('chemical', '/chemical/{identifier}')
config.add_view(lambda request: chosen_frontend.response_wrapper('chemical', request)(getattr(chosen_frontend, 'chemical')(chemical_repr=request.matchdict['identifier'].lower())),
renderer=chosen_frontend.get_renderer('chemical'),
route_name='chemical')
config.add_route('domain', '/domain/{identifier}')
config.add_view(lambda request: chosen_frontend.response_wrapper('domain', request)(getattr(chosen_frontend, 'domain')(domain_repr=request.matchdict['identifier'].lower())),
renderer=chosen_frontend.get_renderer('domain'),
route_name='domain')
config.add_route('reserved_name', '/reservedname/{identifier}')
config.add_view(lambda request: chosen_frontend.response_wrapper('reserved_name', request)(getattr(chosen_frontend, 'reserved_name')(reserved_name_repr=request.matchdict['identifier'].lower())),
renderer=chosen_frontend.get_renderer('reserved_name'),
route_name='reserved_name')
config.add_route('dataset', '/dataset/{identifier}')
config.add_view(lambda request: chosen_frontend.response_wrapper('dataset', request)(getattr(chosen_frontend, 'dataset')(bioitem_repr=request.matchdict['identifier'].lower())),
renderer=chosen_frontend.get_renderer('dataset'),
route_name='dataset')
config.add_route('contig', '/contig/{identifier}')
config.add_view(lambda request: chosen_frontend.response_wrapper('contig', request)(getattr(chosen_frontend, 'contig')(contig_repr=request.matchdict['identifier'].lower())),
renderer=chosen_frontend.get_renderer('contig'),
route_name='contig')
config.add_route('keyword', '/keyword/{identifier}')
config.add_view(lambda request: chosen_frontend.response_wrapper('keyword', request)(getattr(chosen_frontend, 'keyword')(keyword_repr=request.matchdict['identifier'].lower())),
renderer=chosen_frontend.get_renderer('keyword'),
route_name='keyword')
config.add_route('locus_list', '/locus/{list_name}')
config.add_view(lambda request: chosen_frontend.response_wrapper('locus_list', request)(getattr(chosen_frontend, 'locus_list')(list_name=request.matchdict['list_name'])),
renderer=chosen_frontend.get_renderer('locus_list'),
route_name='locus_list')
config.add_route('experiment', '/experiment/{identifier}')
config.add_view(lambda request: chosen_frontend.response_wrapper('experiment', request)(getattr(chosen_frontend, 'experiment')(experiment_repr=request.matchdict['identifier'].lower())),
renderer=chosen_frontend.get_renderer('experiment'),
route_name='experiment')
config.add_route('backend', '/backend/*url')
config.add_view(lambda request: chosen_frontend.response_wrapper('backend', request)(getattr(chosen_frontend, 'backend')(url_repr=request.matchdict['url'], args=request.GET, request=request)),
renderer='json',
route_name='backend')
config.add_route('send_email', '/send_data')
config.add_view(send_message, route_name='send_email')
config.add_route('search_sequence_objects', '/search_sequence_objects')
config.add_view(lambda request: chosen_frontend.response_wrapper('search_sequence_objects', request)(getattr(chosen_frontend, 'search_sequence_objects')(params=request.GET)),
renderer=chosen_frontend.get_renderer('search_sequence_objects'),
route_name='search_sequence_objects')
config.add_route('get_sequence_object', '/get_sequence_object/{id}')
config.add_view(lambda request: chosen_frontend.response_wrapper('get_sequence_object', request)(getattr(chosen_frontend, 'get_sequence_object')(locus_repr=request.matchdict['id'].lower())),
renderer=chosen_frontend.get_renderer('get_sequence_object'),
route_name='get_sequence_object')
config.add_route('do_blast', '/run_blast')
config.add_view(do_blast, route_name='do_blast')
config.add_route('do_patmatch', '/run_patmatch')
config.add_view(do_patmatch, route_name='do_patmatch')
config.add_route('do_restmap', '/run_restmapper')
config.add_view(do_restmap, route_name='do_restmap')
config.add_route('do_seq_analysis', '/run_seqtools')
config.add_view(do_seq_analysis, route_name='do_seq_analysis')
config.add_route('do_gosearch', '/run_gotools')
config.add_view(do_gosearch, route_name='do_gosearch')
config.add_route('get_s3_data', '/get_alignment')
config.add_view(get_s3_data, route_name='get_s3_data')
def prepare_frontend(frontend_type, **configs):
if frontend_type == 'yeastgenome':
from src.sgd.frontend.yeastgenome import yeastgenome_frontend
chosen_frontend, configuration = yeastgenome_frontend(config.backend_url, config.heritage_url, config.log_directory, **configs)
prep_views(chosen_frontend, configuration)
return configuration
|
yeastgenome/SGDFrontend
|
src/sgd/frontend/__init__.py
|
Python
|
mit
| 17,738
|
[
"BLAST"
] |
04b383b21367abd1697ba51a4f2f56d1dca566d6be43d585a5b0f22c6fd3f912
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase
import os.path
import pytest
import numpy as np
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from zoo.orca import OrcaContext
import zoo.orca.data.pandas
from zoo.orca.learn.mxnet import Estimator, create_config
def prepare_data_symbol(df):
data = {'input': np.array(df['data'].values.tolist())}
label = {'label': df['label'].values}
return {'x': data, 'y': label}
def prepare_data_gluon(df):
data = np.array(df['data'].values.tolist())
label = df['label'].values
return {'x': data, 'y': label}
def get_loss(config):
return gluon.loss.SoftmaxCrossEntropyLoss()
def get_gluon_metrics(config):
return mx.metric.Accuracy()
def get_metrics(config):
return 'accuracy'
def get_symbol_model(config):
input_data = mx.symbol.Variable('input')
y_true = mx.symbol.Variable('label')
fc1 = mx.symbol.FullyConnected(data=input_data, num_hidden=20, name='fc1')
fc2 = mx.symbol.FullyConnected(data=fc1, num_hidden=10, name='fc2')
output = mx.symbol.SoftmaxOutput(data=fc2, label=y_true, name='output')
mod = mx.mod.Module(symbol=output,
data_names=['input'],
label_names=['label'],
context=mx.cpu())
return mod
def get_gluon_model(config):
class SimpleModel(gluon.Block):
def __init__(self, **kwargs):
super(SimpleModel, self).__init__(**kwargs)
self.fc1 = nn.Dense(20)
self.fc2 = nn.Dense(10)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
net = SimpleModel()
net.initialize(mx.init.Xavier(magnitude=2.24), ctx=[mx.cpu()])
return net
class TestMXNetSparkXShards(TestCase):
def setup_method(self, method):
self.resource_path = os.path.join(os.path.split(__file__)[0], "../../resources")
OrcaContext.pandas_read_backend = "pandas"
def tearDown(self):
OrcaContext.pandas_read_backend = "spark"
def test_xshards_symbol_with_val(self):
resource_path = os.path.join(os.path.split(__file__)[0], "../../../../resources")
train_file_path = os.path.join(resource_path, "orca/learn/single_input_json/train")
train_data_shard = zoo.orca.data.pandas.read_json(
train_file_path, orient='records', lines=False).transform_shard(prepare_data_symbol)
test_file_path = os.path.join(resource_path, "orca/learn/single_input_json/test")
test_data_shard = zoo.orca.data.pandas.read_json(
test_file_path, orient='records', lines=False).transform_shard(prepare_data_symbol)
config = create_config(log_interval=1, seed=42)
estimator = Estimator.from_mxnet(config=config, model_creator=get_symbol_model,
validation_metrics_creator=get_metrics,
eval_metrics_creator=get_metrics, num_workers=2)
estimator.fit(train_data_shard, epochs=2)
train_data_shard2 = zoo.orca.data.pandas.read_json(
train_file_path, orient='records', lines=False).transform_shard(prepare_data_symbol)
estimator.fit(train_data_shard2, validation_data=test_data_shard, epochs=1, batch_size=32)
estimator.shutdown()
def test_xshards_symbol_without_val(self):
resource_path = os.path.join(os.path.split(__file__)[0], "../../../../resources")
train_file_path = os.path.join(resource_path, "orca/learn/single_input_json/train")
train_data_shard = zoo.orca.data.pandas.read_json(
train_file_path, orient='records', lines=False).transform_shard(prepare_data_symbol)
config = create_config(log_interval=1, seed=42)
estimator = Estimator.from_mxnet(config=config, model_creator=get_symbol_model,
eval_metrics_creator=get_metrics, num_workers=2)
estimator.fit(train_data_shard, epochs=2, batch_size=16)
estimator.shutdown()
def test_xshards_gluon(self):
resource_path = os.path.join(os.path.split(__file__)[0], "../../../../resources")
train_file_path = os.path.join(resource_path, "orca/learn/single_input_json/train")
train_data_shard = zoo.orca.data.pandas.read_json(
train_file_path, orient='records', lines=False).transform_shard(prepare_data_gluon)
test_file_path = os.path.join(resource_path, "orca/learn/single_input_json/train")
test_data_shard = zoo.orca.data.pandas.read_json(
test_file_path, orient='records', lines=False).transform_shard(prepare_data_gluon)
config = create_config(log_interval=1, seed=42)
estimator = Estimator.from_mxnet(config=config, model_creator=get_gluon_model,
loss_creator=get_loss,
validation_metrics_creator=get_gluon_metrics,
eval_metrics_creator=get_gluon_metrics,
num_workers=2)
estimator.fit(train_data_shard, validation_data=test_data_shard, epochs=2, batch_size=8)
estimator.shutdown()
if __name__ == "__main__":
pytest.main([__file__])
|
intel-analytics/analytics-zoo
|
pyzoo/test/zoo/orca/learn/ray/mxnet/test_mxnet_spark_xshards.py
|
Python
|
apache-2.0
| 5,851
|
[
"ORCA"
] |
ec158da10a8604ac43dba324ab2709ebc8db1230dd30680dc8e4d2c4852285e6
|
###
# Copyright 2008-2011 Diamond Light Source Ltd.
# This file is part of Diffcalc.
#
# Diffcalc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Diffcalc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Diffcalc. If not, see <http://www.gnu.org/licenses/>.
###
# TODO: class largely copied from test_calc
from math import pi
from mock import Mock
from nose.plugins.skip import SkipTest
from diffcalc import settings
try:
from numpy import matrix
except ImportError:
from numjy import matrix
from diffcalc.hkl.you.geometry import SixCircle
from diffcalc.hkl.willmott.calc import \
WillmottHorizontalPosition as WillPos
from diffcalc.hkl.you.geometry import YouPosition as YouPos
from diffcalc.hkl.you.calc import YouUbCalcStrategy
from test.tools import matrixeq_
from diffcalc.ub.calc import UBCalculation
from diffcalc.ub.crystal import CrystalUnderTest
from diffcalc.ub.persistence import UbCalculationNonPersister
from test.diffcalc.hkl.you.test_calc import _BaseTest
from diffcalc.settings import NUNAME
TORAD = pi / 180
TODEG = 180 / pi
I = matrix('1 0 0; 0 1 0; 0 0 1')
class SkipTestSurfaceNormalVerticalCubic(_BaseTest):
def setup_method(self):
_BaseTest.setup_method(self)
self.constraints._constrained = {'a_eq_b': None, 'mu': -pi / 2,
'eta': 0}
self.wavelength = 1
self.UB = I * 2 * pi
def _configure_ub(self):
self.mock_ubcalc.UB = self.UB
def _check(self, hkl, pos, virtual_expected={}, fails=False):
if pos is not None:
self._check_angles_to_hkl('', 999, 999, hkl, pos, self.wavelength,
virtual_expected)
if fails:
self._check_hkl_to_angles_fails('', 999, 999, hkl, pos,
self.wavelength, virtual_expected)
else:
self._check_hkl_to_angles('', 999, 999, hkl, pos, self.wavelength,
virtual_expected)
def testHkl001(self):
pos = YouPos(mu=-90, delta=60, nu=0, eta=0, chi=90 + 30, phi=-90, unit='DEG')
self._check((0, 0, 1), pos, {'alpha': 30, 'beta': 30})
def testHkl011(self):
# raise SkipTest()
# skipped because we can't calculate values to check against by hand
pos = YouPos(mu=-90, delta=90, nu=0, eta=0, chi=90 + 90, phi=-90, unit='DEG')
self._check((0, 1, 1), pos, {'alpha': 45, 'beta': 45})
def testHkl010fails(self):
self._check((0, 1, 0),
None,
{'alpha': 30, 'beta': 30}, fails=True)
def testHkl100fails(self):
self._check((1, 0, 0),
None,
{'alpha': 30, 'beta': 30}, fails=True)
def testHkl111(self):
raise SkipTest()
# skipped because we can't calculate values to check against by hand
pos = YouPos(mu=-90, delta=90, nu=0, eta=0, chi=90 + 90, phi=-90, unit='DEG')
self._check((1, 1, 1), pos, {'alpha': 45, 'beta': 45})
# Primary and secondary reflections found with the help of DDIF on Diamond's
# i07 on Jan 27 2010
HKL0 = 2, 19, 32
REF0 = WillPos(delta=21.975, gamma=4.419, omegah=2, phi=326.2)
HKL1 = 0, 7, 22
REF1 = WillPos(delta=11.292, gamma=2.844, omegah=2, phi=124.1)
WAVELENGTH = 0.6358
ENERGY = 12.39842 / WAVELENGTH
# This is the version that Diffcalc comes up with ( see following test)
U_DIFFCALC = matrix([[-0.7178876, 0.6643924, -0.2078944],
[-0.6559596, -0.5455572, 0.5216170],
[0.2331402, 0.5108327, 0.8274634]])
#class WillmottHorizontalGeometry(VliegGeometry):
#
# def __init__(self):
# VliegGeometry.__init__(self,
# name='willmott_horizontal',
# supported_mode_groups=[],
# fixed_parameters={},
# gamma_location='base'
# )
#
# def physical_angles_to_internal_position(self, physicalAngles):
# assert (len(physicalAngles) == 4), "Wrong length of input list"
# return WillPos(*physicalAngles)
#
# def internal_position_to_physical_angles(self, internalPosition):
# return internalPosition.totuple()
def willmott_to_you_fixed_mu_eta(pos):
pos = YouPos(mu=-90,
delta=pos.delta,
nu=pos.gamma,
eta=0,
chi=90 + pos.omegah,
phi=-90 - pos.phi,
unit='DEG')
if pos.phi > 180:
pos.phi -= 360
elif pos.phi < -180:
pos.phi += 360
return pos
class TestUBCalculationWithWillmotStrategy_Si_5_5_12_FixedMuEta():
def setup_method(self):
hardware = Mock()
hardware.get_axes_names.return_value = ('m', 'd', 'n', 'e', 'c',
'p')
settings.hardware = hardware
settings.geometry = SixCircle()
self.ubcalc = UBCalculation(UbCalculationNonPersister(),
YouUbCalcStrategy())
def testAgainstResultsFromJan_27_2010(self):
self.ubcalc.start_new('test')
self.ubcalc.set_lattice('Si_5_5_12', 7.68, 53.48, 75.63, 90, 90, 90)
self.ubcalc.add_reflection(
HKL0[0], HKL0[1], HKL0[2], willmott_to_you_fixed_mu_eta(REF0),
ENERGY, 'ref0', None)
self.ubcalc.add_reflection(
HKL1[0], HKL1[1], HKL1[2], willmott_to_you_fixed_mu_eta(REF1),
ENERGY, 'ref1', None)
self.ubcalc.calculate_UB()
print "U: ", self.ubcalc.U
print "UB: ", self.ubcalc.UB
matrixeq_(self.ubcalc.U, U_DIFFCALC)
class TestFixedMuEta(_BaseTest):
def setup_method(self):
_BaseTest.setup_method(self)
self._configure_constraints()
self.wavelength = 0.6358
B = CrystalUnderTest('xtal', 7.68, 53.48,
75.63, 90, 90, 90).B
self.UB = U_DIFFCALC * B
self._configure_limits()
def _configure_constraints(self):
self.constraints._constrained = {'alpha': 2 * TORAD, 'mu': -pi / 2,
'eta': 0}
def _configure_limits(self):
self.mock_hardware.set_lower_limit(NUNAME, None)
self.mock_hardware.set_upper_limit('delta', 90)
self.mock_hardware.set_lower_limit('mu', None)
self.mock_hardware.set_lower_limit('eta', None)
self.mock_hardware.set_lower_limit('chi', None)
def _convert_willmott_pos(self, willmott_pos):
return willmott_to_you_fixed_mu_eta(willmott_pos)
def _configure_ub(self):
self.mock_ubcalc.UB = self.UB
def _check(self, hkl, pos, virtual_expected={}, fails=False):
self._check_angles_to_hkl('', 999, 999, hkl, pos, self.wavelength,
virtual_expected)
if fails:
self._check_hkl_to_angles_fails('', 999, 999, hkl, pos,
self.wavelength, virtual_expected)
else:
self._check_hkl_to_angles('', 999, 999, hkl, pos, self.wavelength,
virtual_expected)
def testHkl_2_19_32_found_orientation_setting(self):
'''Check that the or0 reflection maps back to the assumed hkl'''
self.places = 2
self._check_angles_to_hkl('', 999, 999, HKL0,
self._convert_willmott_pos(REF0),
self.wavelength, {'alpha': 2})
def testHkl_0_7_22_found_orientation_setting(self):
'''Check that the or1 reflection maps back to the assumed hkl'''
self.places = 0
self._check_angles_to_hkl('', 999, 999, HKL1,
self._convert_willmott_pos(REF1),
self.wavelength, {'alpha': 2})
def testHkl_2_19_32_calculated_from_DDIF(self):
self.places = 3
willpos = WillPos(delta=21.974, gamma=4.419, omegah=2, phi=-33.803)
self._check((2, 19, 32),
self._convert_willmott_pos(willpos),
{'alpha': 2})
def testHkl_0_7_22_calculated_from_DDIF(self):
self.places = 3
willpos = WillPos(delta=11.241801854649, gamma=-3.038407637123,
omegah=2, phi=-86.56344250267)
self._check((0, 7, 22),
self._convert_willmott_pos(willpos),
{'alpha': 2})
def testHkl_2_m5_12_calculated_from_DDIF(self):
self.places = 3
willpos = WillPos(delta=5.224, gamma=10.415, omegah=2, phi=-1.972)
self._check((2, -5, 12),
self._convert_willmott_pos(willpos),
{'alpha': 2})
def testHkl_2_19_32_calculated_predicted_with_diffcalc_and_found(self):
willpos = WillPos(delta=21.974032376045, gamma=4.418955754003,
omegah=2, phi=-33.80254)
self._check((2, 19, 32),
self._convert_willmott_pos(willpos),
{'alpha': 2})
def testHkl_0_7_22_calculated_predicted_with_diffcalc_and_found(self):
willpos = WillPos(delta=11.241801854649, gamma=-3.038407637123,
omegah=2, phi=-86.563442502670)
self._check((0, 7, 22),
self._convert_willmott_pos(willpos),
{'alpha': 2})
def testHkl_2_m5_12_calculated_predicted_with_diffcalc_and_found(self):
willpos = WillPos(delta=5.223972025344, gamma=10.415435905622,
omegah=2, phi=-90 + 88.02751)
self._check((2, -5, 12),
self._convert_willmott_pos(willpos),
{'alpha': 2})
###############################################################################
def willmott_to_you_fixed_mu_chi(pos):
pos = YouPos(mu=-0,
delta=pos.delta,
nu=pos.gamma,
eta=pos.omegah,
chi=90,
phi=-pos.phi,
unit='DEG')
if pos.phi > 180:
pos.phi -= 360
elif pos.phi < -180:
pos.phi += 360
return pos
class TestUBCalculationWithWillmotStrategy_Si_5_5_12_FixedMuChi():
def setup_method(self):
hardware = Mock()
names = 'm', 'd', 'n', 'e', 'c', 'p'
hardware.get_axes_names.return_value = names
settings.hardware = hardware
settings.geometry = SixCircle()
self.ubcalc = UBCalculation(UbCalculationNonPersister(),
YouUbCalcStrategy())
def testAgainstResultsFromJan_27_2010(self):
self.ubcalc.start_new('test')
self.ubcalc.set_lattice('Si_5_5_12', 7.68, 53.48, 75.63, 90, 90, 90)
self.ubcalc.add_reflection(
HKL0[0], HKL0[1], HKL0[2], willmott_to_you_fixed_mu_chi(REF0),
ENERGY, 'ref0', None)
self.ubcalc.add_reflection(
HKL1[0], HKL1[1], HKL1[2], willmott_to_you_fixed_mu_chi(REF1),
ENERGY, 'ref1', None)
self.ubcalc.calculate_UB()
print "U: ", self.ubcalc.U
print "UB: ", self.ubcalc.UB
matrixeq_(self.ubcalc.U, U_DIFFCALC)
class Test_Fixed_Mu_Chi(TestFixedMuEta):
def _configure_constraints(self):
self.constraints._constrained = {'alpha': 2 * TORAD, 'mu': 0,
'chi': pi / 2}
def _convert_willmott_pos(self, willmott_pos):
return willmott_to_you_fixed_mu_chi(willmott_pos)
def willmott_to_you_fixed_eta_chi(pos):
pos = YouPos(mu=pos.omegah,
delta=-pos.gamma,
nu=pos.delta,
eta=0,
chi=0,
phi=-pos.phi,
unit='DEG')
if pos.phi > 180:
pos.phi -= 360
elif pos.phi < -180:
pos.phi += 360
return pos
class Test_Fixed_Eta_Chi(TestFixedMuEta):
def _configure_constraints(self):
self.constraints._constrained = {'alpha': 2 * TORAD, 'eta': 0,
'chi': 0}
def _convert_willmott_pos(self, willmott_pos):
return willmott_to_you_fixed_eta_chi(willmott_pos)
def testHkl_2_19_32_found_orientation_setting(self):
SkipTest()
def testHkl_0_7_22_found_orientation_setting(self):
SkipTest()
def testHkl_2_19_32_calculated_from_DDIF(self):
SkipTest()
def testHkl_0_7_22_calculated_from_DDIF(self):
SkipTest()
def testHkl_2_m5_12_calculated_from_DDIF(self):
SkipTest()
def testHkl_2_19_32_calculated_predicted_with_diffcalc_and_found(self):
willpos = WillPos(delta=22.0332862, gamma=-4.0973643,
omegah=2, phi=--64.0273584)
self._check((2, 19, 32),
self._convert_willmott_pos(willpos),
{'alpha': 2})
def testHkl_0_7_22_calculated_predicted_with_diffcalc_and_found(self):
willpos = WillPos(delta=11.2572236, gamma=-2.9800571,
omegah=2, phi=-86.5634425)
self._check((0, 7, 22),
self._convert_willmott_pos(willpos),
{'alpha': 2})
def testHkl_2_m5_12_calculated_predicted_with_diffcalc_and_found(self):
willpos = WillPos(delta=5.3109941, gamma=-10.3716944,
omegah=2, phi=167.0041454)
self._check((2, -5, 12),
self._convert_willmott_pos(willpos),
{'alpha': 2})
# Primary and secondary reflections found with the help of DDIF on Diamond's
# i07 on Jan 28/29 2010
Pt531_HKL0 = -1.000, 1.000, 6.0000
Pt531_REF0 = WillPos(delta=9.3971025, gamma=16.1812303, omegah=2,
phi=-52.1392905)
Pt531_HKL1 = -2.000, -1.000, 7.0000
Pt531_REF1 = WillPos(delta=11.0126958, gamma=-11.8636128, omegah=2,
phi=40.3803393)
Pt531_REF12 = WillPos(delta=11.0126958, gamma=11.8636128, omegah=2,
phi=-121.2155975)
Pt531_HKL2 = 1, 1, 9
Pt531_REF2 = WillPos(delta=14.1881617, gamma=7.7585939, omegah=2,
phi=23.0203132)
Pt531_REF22 = WillPos(delta=14.1881617, gamma=-7.7585939, omegah=2,
phi=-183.465146)
Pt531_WAVELENGTH = 0.6358
# This is U matrix displayed by DDIF
U_FROM_DDIF = matrix([[-0.00312594, -0.00063417, 0.99999491],
[0.99999229, -0.00237817, 0.00312443],
[0.00237618, 0.99999697, 0.00064159]])
# This is the version that Diffcalc comes up with ( see following test)
Pt531_U_DIFFCALC = matrix([[-0.0023763, -0.9999970, -0.0006416],
[0.9999923, -0.0023783, 0.0031244],
[-0.0031259, -0.0006342, 0.9999949]])
class TestUBCalculationWithYouStrategy_Pt531_FixedMuChi():
def setup_method(self):
hardware = Mock()
names = 'm', 'd', 'n', 'e', 'c', 'p'
hardware.get_axes_names.return_value = names
settings.hardware = hardware
settings.geometry = SixCircle()
self.ubcalc = UBCalculation(UbCalculationNonPersister(),
YouUbCalcStrategy())
def testAgainstResultsFromJan_28_2010(self):
self.ubcalc.start_new('test')
self.ubcalc.set_lattice('Pt531', 6.204, 4.806, 23.215, 90, 90, 49.8)
self.ubcalc.add_reflection(Pt531_HKL0[0], Pt531_HKL0[1], Pt531_HKL0[2],
willmott_to_you_fixed_mu_chi(Pt531_REF0),
12.39842 / Pt531_WAVELENGTH,
'ref0', None)
self.ubcalc.add_reflection(Pt531_HKL1[0], Pt531_HKL1[1], Pt531_HKL1[2],
willmott_to_you_fixed_mu_chi(Pt531_REF1),
12.39842 / Pt531_WAVELENGTH,
'ref1', None)
self.ubcalc.calculate_UB()
print "U: ", self.ubcalc.U
print "UB: ", self.ubcalc.UB
matrixeq_(self.ubcalc.U, Pt531_U_DIFFCALC)
class Test_Pt531_FixedMuChi(_BaseTest):
def setup_method(self):
_BaseTest.setup_method(self)
self._configure_constraints()
self.wavelength = Pt531_WAVELENGTH
CUT = CrystalUnderTest('Pt531', 6.204, 4.806, 23.215, 90, 90, 49.8)
B = CUT.B
self.UB = Pt531_U_DIFFCALC * B
self._configure_limits()
def _configure_constraints(self):
self.constraints._constrained = {'alpha': 2 * TORAD, 'mu': 0,
'chi': pi / 2}
def _configure_limits(self):
self.mock_hardware.set_lower_limit(NUNAME, None)
#self.mock_hardware.set_lower_limit('delta', None)
self.mock_hardware.set_upper_limit('delta', 90)
self.mock_hardware.set_lower_limit('mu', None)
self.mock_hardware.set_lower_limit('eta', None)
self.mock_hardware.set_lower_limit('chi', None)
def _convert_willmott_pos(self, willmott_pos):
return willmott_to_you_fixed_mu_chi(willmott_pos)
def _configure_ub(self):
self.mock_ubcalc.UB = self.UB
def _check(self, hkl, pos, virtual_expected={}, fails=False):
self._check_angles_to_hkl('', 999, 999, hkl, pos, self.wavelength,
virtual_expected)
if fails:
self._check_hkl_to_angles_fails('', 999, 999, hkl, pos,
self.wavelength, virtual_expected)
else:
self._check_hkl_to_angles('', 999, 999, hkl, pos, self.wavelength,
virtual_expected)
def testHkl_0_found_orientation_setting(self):
'''Check that the or0 reflection maps back to the assumed hkl'''
self.places = 1
self._check_angles_to_hkl('', 999, 999, Pt531_HKL0,
self._convert_willmott_pos(Pt531_REF0),
self.wavelength, {'alpha': 2})
def testHkl_1_found_orientation_setting(self):
'''Check that the or1 reflection maps back to the assumed hkl'''
self.places = 0
self._check_angles_to_hkl('', 999, 999, Pt531_HKL1,
self._convert_willmott_pos(Pt531_REF1),
self.wavelength, {'alpha': 2})
def testHkl_0_calculated_from_DDIF(self):
self.places = 7
pos_expected = self._convert_willmott_pos(Pt531_REF0)
self._check(Pt531_HKL0,
pos_expected,
{'alpha': 2})
def testHkl_1_calculated_from_DDIF(self):
self.places = 7
self._check(Pt531_HKL1,
self._convert_willmott_pos(Pt531_REF1),
{'alpha': 2})
def testHkl_2_calculated_from_DDIF(self):
self.places = 7
self._check(Pt531_HKL2,
self._convert_willmott_pos(Pt531_REF2),
{'alpha': 2})
def testHkl_2_m1_0_16(self):
self.places = 7
pos = WillPos(delta=25.7990976, gamma=-6.2413545, omegah=2,
phi=47.4624380)
# pos.phi -= 360
self._check((-1, 0, 16),
self._convert_willmott_pos(pos),
{'alpha': 2})
class Test_Pt531_Fixed_Mu_eta_(Test_Pt531_FixedMuChi):
def _configure_constraints(self):
self.constraints._constrained = {'alpha': 2 * TORAD, 'mu': -pi / 2,
'eta': 0}
def _convert_willmott_pos(self, willmott_pos):
return willmott_to_you_fixed_mu_eta(willmott_pos)
def testHkl_1_calculated_from_DDIF(self):
self.places = 7
self._check(Pt531_HKL1,
self._convert_willmott_pos(Pt531_REF12),
{'alpha': 2})
def testHkl_2_calculated_from_DDIF(self):
self.places = 7
self._check(Pt531_HKL2,
self._convert_willmott_pos(Pt531_REF22),
{'alpha': 2})
def testHkl_2_m1_0_16(self):
self.places = 7
pos = WillPos(delta=25.7990976, gamma=6.2413545, omegah=2,
phi=-47.4949600)
# pos.phi -= 360
self._check((-1, 0, 16),
self._convert_willmott_pos(pos),
{'alpha': 2})
|
DiamondLightSource/diffcalc
|
test/diffcalc/hkl/you/test_calc_surface.py
|
Python
|
gpl-3.0
| 20,799
|
[
"CRYSTAL"
] |
04a308f71e8b82a4ce11067d15dd506960013cc558ac70c006f1d0099acd4481
|
# This file is part of PyEMMA.
#
# Copyright (c) 2016, 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import itertools
from logging import getLogger
import numpy as np
from pyemma.coordinates import source
from pyemma.coordinates.data import FeatureReader
from pyemma.coordinates.data.fragmented_trajectory_reader import FragmentedTrajectoryReader
from pyemma.coordinates.data.util.reader_utils import (copy_traj_attributes as _copy_traj_attributes,
preallocate_empty_trajectory as _preallocate_empty_trajectory,
enforce_top as _enforce_top)
__all__ = ['frames_from_files']
log = getLogger(__name__)
def frames_from_files(files, top, frames, chunksize=1000, stride=1, verbose=False, copy_not_join=None, reader=None):
"""
Constructs a Trajectory object out of given frames collected from files (or given reader).
:param files: source files
:param top: topology file
:param frames: indices
:param chunksize:
:param stride:
:param verbose:
:param copy_not_join: not used
:param reader: if a reader is given, ignore files and top param!
:return: mdtra.Trajectory consisting out of frames indices.
"""
# Enforce topology to be a md.Topology object
if reader is None:
top = _enforce_top(top)
reader_given = False
else:
if not reader.number_of_trajectories():
raise ValueError("need at least one trajectory file in reader.")
if isinstance(reader, FragmentedTrajectoryReader):
top = reader._readers[0][0].featurizer.topology
elif isinstance(reader, FeatureReader):
top = reader.featurizer.topology
else:
raise ValueError("unsupported reader (only md readers).")
reader_given = True
stride = int(stride)
frames = np.array(frames)
# only one file, so we expect frames to be a one dimensional array
if isinstance(files, str):
files = [files]
if frames.ndim == 1:
# insert a constant column for file index
frames = np.insert(np.atleast_2d(frames), 0, np.zeros_like(frames), axis=0).T
if stride != 1:
frames[:, 1] *= stride
if verbose:
log.info('A stride value of = %u was parsed, '
'interpreting "indexes" accordingly.' % stride)
# sort by file and frame index
sort_inds = np.lexsort((frames[:, 1], frames[:, 0]))
sorted_inds = frames[sort_inds]
assert len(sorted_inds) == len(frames)
file_inds_unique = np.unique(sorted_inds[:, 0])
# construct reader
if reader is None:
# filter out files, we would never read, because no indices are pointing to them
reader = source(np.array(files)[file_inds_unique].tolist(), top=top)
# re-map indices to reflect filtered files:
for itraj, c in zip(file_inds_unique, itertools.count(0)):
mask = sorted_inds[:, 0] == itraj
sorted_inds[mask, 0] = c
inds_to_check = np.arange(len(file_inds_unique))
else:
inds_to_check = file_inds_unique
# sanity check of indices
for itraj in inds_to_check:
inds_by_traj = sorted_inds[sorted_inds[:, 0] == itraj][:, 1]
assert inds_by_traj.ndim == 1
largest_ind_in_traj = np.max(inds_by_traj)
length = reader.trajectory_length(itraj)
if largest_ind_in_traj >= length:
raise ValueError("largest specified index ({largest_without_stride} * stride="
"{largest_without_stride} * {stride}={largest}) "
"is larger than trajectory length '{filename}' = {length}".format(
largest_without_stride=largest_ind_in_traj / stride,
stride=stride,
largest=largest_ind_in_traj,
filename=reader.filenames[itraj],
length=length))
def set_reader_return_traj_objects(reader, flag):
if isinstance(reader, FeatureReader):
reader._return_traj_obj = flag
elif isinstance(reader, FragmentedTrajectoryReader):
for file in reader.filenames_flat:
r = reader.reader_by_filename(file)
if isinstance(r, FeatureReader):
r = [r]
for _r in r:
_r._return_traj_obj = flag
try:
# If the reader got passed in, it could have the data already mapped to memory.
# In this case, we cannot force it to return trajectory objects, so we have to re-create it.
if reader.in_memory:
reader = source(reader.filenames, top=top, chunksize=chunksize)
# we want the FeatureReader to return mdtraj.Trajectory objects
set_reader_return_traj_objects(reader, True)
it = reader.iterator(chunk=chunksize, stride=sorted_inds, return_trajindex=False)
with it:
collected_frames = [f for f in it]
dest = _preallocate_empty_trajectory(top, len(frames))
t = 0
for chunk in collected_frames:
_copy_traj_attributes(dest, chunk, t)
t += len(chunk)
# reshuffle the indices of the final trajectory object to obtain the desired order
dest = dest.slice(sort_inds.argsort(), copy=False)
finally:
# in any case we want to reset the reader to its previous state (return features, instead of md.Trajectory)
if reader_given:
set_reader_return_traj_objects(reader, False)
return dest
|
marscher/PyEMMA
|
pyemma/coordinates/data/util/frames_from_file.py
|
Python
|
lgpl-3.0
| 6,378
|
[
"MDTraj"
] |
908dd56913ed059b1eb0f3dd983ee2d2f1f3facf7a453939ca9c8999ad458cd0
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`screen` module provides management functionality for a machines'
displays.
"""
import logging
import copy
from PyQt4 import QtCore
from openlp.core.lib import Registry, translate
log = logging.getLogger(__name__)
class ScreenList(object):
"""
Wrapper to handle the parameters of the display screen.
To get access to the screen list call ``ScreenList()``.
"""
log.info('Screen loaded')
__instance__ = None
def __new__(cls):
"""
Re-implement __new__ to create a true singleton.
"""
if not cls.__instance__:
cls.__instance__ = object.__new__(cls)
return cls.__instance__
@classmethod
def create(cls, desktop):
"""
Initialise the screen list.
``desktop``
A ``QDesktopWidget`` object.
"""
screen_list = cls()
screen_list.desktop = desktop
screen_list.preview = None
screen_list.current = None
screen_list.override = None
screen_list.screen_list = []
screen_list.display_count = 0
screen_list.screen_count_changed()
screen_list.load_screen_settings()
desktop.resized.connect(screen_list.screen_resolution_changed)
desktop.screenCountChanged.connect(screen_list.screen_count_changed)
return screen_list
def screen_resolution_changed(self, number):
"""
Called when the resolution of a screen has changed.
``number``
The number of the screen, which size has changed.
"""
log.info('screen_resolution_changed %d' % number)
for screen in self.screen_list:
if number == screen['number']:
newScreen = {
'number': number,
'size': self.desktop.screenGeometry(number),
'primary': self.desktop.primaryScreen() == number
}
self.remove_screen(number)
self.add_screen(newScreen)
# The screen's default size is used, that is why we have to
# update the override screen.
if screen == self.override:
self.override = copy.deepcopy(newScreen)
self.set_override_display()
Registry().execute('config_screen_changed')
break
def screen_count_changed(self, changed_screen=-1):
"""
Called when a screen has been added or removed.
``changed_screen``
The screen's number which has been (un)plugged.
"""
# Do not log at start up.
if changed_screen != -1:
log.info('screen_count_changed %d' % self.desktop.screenCount())
# Remove unplugged screens.
for screen in copy.deepcopy(self.screen_list):
if screen['number'] == self.desktop.screenCount():
self.remove_screen(screen['number'])
# Add new screens.
for number in range(self.desktop.screenCount()):
if not self.screen_exists(number):
self.add_screen({
'number': number,
'size': self.desktop.screenGeometry(number),
'primary': (self.desktop.primaryScreen() == number)
})
# We do not want to send this message at start up.
if changed_screen != -1:
# Reload setting tabs to apply possible changes.
Registry().execute('config_screen_changed')
def get_screen_list(self):
"""
Returns a list with the screens. This should only be used to display
available screens to the user::
[u'Screen 1 (primary)', u'Screen 2']
"""
screen_list = []
for screen in self.screen_list:
screen_name = '%s %d' % (translate('OpenLP.ScreenList', 'Screen'), screen['number'] + 1)
if screen['primary']:
screen_name = '%s (%s)' % (screen_name, translate('OpenLP.ScreenList', 'primary'))
screen_list.append(screen_name)
return screen_list
def add_screen(self, screen):
"""
Add a screen to the list of known screens.
``screen``
A dict with the screen properties::
{
u'primary': True,
u'number': 0,
u'size': PyQt4.QtCore.QRect(0, 0, 1024, 768)
}
"""
log.info('Screen %d found with resolution %s', screen['number'], screen['size'])
if screen['primary']:
self.current = screen
self.override = copy.deepcopy(self.current)
self.screen_list.append(screen)
self.display_count += 1
def remove_screen(self, number):
"""
Remove a screen from the list of known screens.
``number``
The screen number (int).
"""
log.info('remove_screen %d' % number)
for screen in self.screen_list:
if screen['number'] == number:
self.screen_list.remove(screen)
self.display_count -= 1
break
def screen_exists(self, number):
"""
Confirms a screen is known.
``number``
The screen number (int).
"""
for screen in self.screen_list:
if screen['number'] == number:
return True
return False
def set_current_display(self, number):
"""
Set up the current screen dimensions.
``number``
The screen number (int).
"""
log.debug('set_current_display %s', number)
if number + 1 > self.display_count:
self.current = self.screen_list[0]
else:
self.current = self.screen_list[number]
self.preview = copy.deepcopy(self.current)
self.override = copy.deepcopy(self.current)
if self.display_count == 1:
self.preview = self.screen_list[0]
def set_override_display(self):
"""
Replace the current size with the override values, as the user wants to
have their own screen attributes.
"""
log.debug('set_override_display')
self.current = copy.deepcopy(self.override)
self.preview = copy.deepcopy(self.current)
def reset_current_display(self):
"""
Replace the current values with the correct values, as the user wants to
use the correct screen attributes.
"""
log.debug('reset_current_display')
self.set_current_display(self.current['number'])
def which_screen(self, window):
"""
Return the screen number that the centre of the passed window is in.
``window``
A QWidget we are finding the location of.
"""
x = window.x() + (window.width() // 2)
y = window.y() + (window.height() // 2)
for screen in self.screen_list:
size = screen['size']
if x >= size.x() and x <= (size.x() + size.width()) and y >= size.y() and y <= (size.y() + size.height()):
return screen['number']
def load_screen_settings(self):
"""
Loads the screen size and the monitor number from the settings.
"""
from openlp.core.lib import Settings
# Add the screen settings to the settings dict. This has to be done here due to cyclic dependency.
# Do not do this anywhere else.
screen_settings = {
'core/x position': self.current['size'].x(),
'core/y position': self.current['size'].y(),
'core/monitor': self.display_count - 1,
'core/height': self.current['size'].height(),
'core/width': self.current['size'].width()
}
Settings.extend_default_settings(screen_settings)
settings = Settings()
settings.beginGroup('core')
monitor = settings.value('monitor')
self.set_current_display(monitor)
self.display = settings.value('display on monitor')
override_display = settings.value('override position')
x = settings.value('x position')
y = settings.value('y position')
width = settings.value('width')
height = settings.value('height')
self.override['size'] = QtCore.QRect(x, y, width, height)
self.override['primary'] = False
settings.endGroup()
if override_display:
self.set_override_display()
else:
self.reset_current_display()
|
marmyshev/item_title
|
openlp/core/lib/screen.py
|
Python
|
gpl-2.0
| 10,667
|
[
"Brian"
] |
bb3603557d621a7e6aeb9567ccf3fc5fc7528ce23eb9888c812afdb6b04d0d6c
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = 'CwT'
import urllib.parse as urlparse
import logging
IGNORED_EXTENSIONS = [
# images
'mng', 'pct', 'bmp', 'gif', 'jpg', 'jpeg', 'png', 'pst', 'psp', 'tif',
'tiff', 'ai', 'drw', 'dxf', 'eps', 'ps', 'svg', 'ico',
# audio
'mp3', 'wma', 'ogg', 'wav', 'ra', 'aac', 'mid', 'au', 'aiff',
# video
'3gp', 'asf', 'asx', 'avi', 'mov', 'mp4', 'mpg', 'qt', 'rm', 'swf', 'wmv',
'm4a',
# other
'css', 'pdf', 'doc', 'exe', 'bin', 'rss', 'zip', 'rar', 'js', 'xml',
]
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class UrlEliminator(object):
"""URL去重模块
去除无需扫描或者重复的页面:
1. 后缀为jpg等图片、视频页面
2. 通过正则匹配去除重复页面
"""
def __init__(self, entry=None, setting=None):
self.visited = set()
self.setting = setting
if entry:
self.entry = entry
self.visited.add(entry)
def visit(self, url):
_url = self._urlRegexize(url)
# logger.debug("[+]Regexized url %s-->%s" % (url, _url))
if len(self.visited) == 0:
self.entry = url
self.visited.add(_url)
return True
if self.setting and not self.setting.nocheckhost and \
not self._checkSameHost(self.entry, url):
# logger.debug("It's not the same host %s" % url)
return False
if any(_url.endswith(".%s" % each) for each in IGNORED_EXTENSIONS):
# note: we don't need to worry too much if the url ended with ignored extensions yet turn out to be
# a normal page, since by here we have format the origin url into a more concise one
return False
if _url in self.visited:
return False
self.visited.add(_url)
return True
def _checkSameHost(self, *urls):
if not urls:
return None
elif len(urls) == 1:
return True
else:
return all(urlparse.urlparse(url or "").netloc.split(':')[0] ==
urlparse.urlparse(urls[0] or "").netloc.split(':')[0] for url in urls[1:])
def _urlRegexize(self, url):
# scheme://netloc/path;parameters?query#fragment
# http://video.sina.com.cn/ent/s/h/2010-01-10/163961994.html?a=1&b=10
# --> http://video.sina.com.cn/ent/s/h/d+-d+-d+/d+.html?a=&b=
comp = urlparse.urlparse(url)
path = comp.path
i, start = 0, -1
result = ''
while i < len(path):
if '0' <= path[i] <= '9':
start = i if start == -1 else start
elif start != -1:
result += "\d+"
start = -1
continue
else:
result += path[i]
i += 1
if start != -1:
result += "\d+"
path = result
query = ''
for key in urlparse.parse_qs(comp.query).keys():
if query != '': query += '&'
query += (key+'=')
# TODO: exclude params????
return urlparse.urlunparse((comp.scheme, comp.netloc, path, comp.params, query, ""))
def display(self):
for url in self.visited:
logger.debug(url)
# if __name__ == '__main__':
# eliminator = UrlEliminator()
# print(eliminator._urlRegexize("http://video.sina.com.cn/ent/s/h/2010-01-10/163961994.php?a=1&b=10"))
|
CvvT/crawler_sqlmap
|
crawler/util/urleliminate.py
|
Python
|
apache-2.0
| 3,493
|
[
"VisIt"
] |
8412d01fa82a0c40d0d877bd35ac3fcb3a0531634bce997d2c59cef88a396d9e
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from urlparse import urlparse
from django.db import migrations
from django.conf import settings
import os
from pyaxiom.netcdf import EnhancedDataset, EnhancedMFDataset
def path(dataset):
if urlparse(dataset.uri).scheme == "" and not dataset.uri.startswith("/"):
# We have a relative path, make it absolute to the sciwms directory.
return str(os.path.realpath(os.path.join(settings.PROJECT_ROOT, dataset.uri)))
else:
return str(dataset.uri)
def netcdf4_dataset(dataset):
try:
return EnhancedDataset(path(dataset))
except:
try:
return EnhancedMFDataset(path(dataset), aggdim='time')
except:
return None
def make_vector_layer(apps, us, vs, std_name, style, dataset_id):
VirtualLayer = apps.get_model('wms', 'VirtualLayer')
Style = apps.get_model('wms', 'Style')
for u in us:
for v in vs:
if u.standard_name.split('_')[1:] == v.standard_name.split('_')[1:]:
try:
vl = VirtualLayer.objects.create(var_name='{},{}'.format(u._name, v._name),
std_name=std_name,
description="U ({}) and V ({}) vectors".format(u._name, v._name),
dataset_id=dataset_id)
vl.styles.add(Style.objects.get(colormap='jet', image_type=style))
vl.save()
break
except:
raise
def forward(apps, schema_editor):
Dataset = apps.get_model('wms', 'Dataset')
VirtualLayer = apps.get_model('wms', 'VirtualLayer')
# clean slate
VirtualLayer.objects.all().delete()
for dataset in Dataset.objects.all():
nc = netcdf4_dataset(dataset)
if nc is not None:
# Earth Projected Sea Water Velocity
u_names = ['eastward_sea_water_velocity', 'eastward_sea_water_velocity_assuming_no_tide']
v_names = ['northward_sea_water_velocity', 'northward_sea_water_velocity_assuming_no_tide']
us = nc.get_variables_by_attributes(standard_name=lambda v: v in u_names)
vs = nc.get_variables_by_attributes(standard_name=lambda v: v in v_names)
make_vector_layer(apps, us, vs, 'sea_water_velocity', 'vectors', dataset.id)
# Grid projected Sea Water Velocity
u_names = ['x_sea_water_velocity', 'grid_eastward_sea_water_velocity']
v_names = ['y_sea_water_velocity', 'grid_northward_sea_water_velocity']
us = nc.get_variables_by_attributes(standard_name=lambda v: v in u_names)
vs = nc.get_variables_by_attributes(standard_name=lambda v: v in v_names)
make_vector_layer(apps, us, vs, 'grid_sea_water_velocity', 'vectors', dataset.id)
# Earth projected Winds
u_names = ['eastward_wind']
v_names = ['northward_wind']
us = nc.get_variables_by_attributes(standard_name=lambda v: v in u_names)
vs = nc.get_variables_by_attributes(standard_name=lambda v: v in v_names)
make_vector_layer(apps, us, vs, 'winds', 'barbs', dataset.id)
# Grid projected Winds
u_names = ['x_wind', 'grid_eastward_wind']
v_names = ['northward_wind', 'grid_northward_wind']
us = nc.get_variables_by_attributes(standard_name=lambda v: v in u_names)
vs = nc.get_variables_by_attributes(standard_name=lambda v: v in v_names)
make_vector_layer(apps, us, vs, 'grid_winds', 'barbs', dataset.id)
# Earth projected Ice velocity
u_names = ['eastward_sea_ice_velocity']
v_names = ['northward_sea_ice_velocity']
us = nc.get_variables_by_attributes(standard_name=lambda v: v in u_names)
vs = nc.get_variables_by_attributes(standard_name=lambda v: v in v_names)
make_vector_layer(apps, us, vs, 'sea_ice_velocity', 'vectors', dataset.id)
nc.close()
def reverse(apps, schema_editor):
VirtualLayer = apps.get_model('wms', 'VirtualLayer')
VirtualLayer.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('wms', '0020_add_back_virtuallayer'),
]
operations = [
migrations.RunPython(forward, reverse_code=reverse),
]
|
ayan-usgs/sci-wms
|
wms/migrations/0021_auto_20150429_1429.py
|
Python
|
gpl-3.0
| 4,468
|
[
"NetCDF"
] |
3ed97202ed319c7453b326ba054edc03f379d87fe62468833e2111ececfbc884
|
import types
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from RESTDIRAC.RESTSystem.DB.OATokenDB import OATokenDB
from RESTDIRAC.RESTSystem.Client import OAToken
__RCSID__ = "$Id$"
class OATokenStoreHandler( RequestHandler ):
@classmethod
def initializeHandler( cls, serviceInfoDict ):
try:
cls.tokenDB = OATokenDB()
except RuntimeError, excp:
return S_ERROR( "Could not connect to DB: %s" % excp )
result = cls.tokenDB._getConnection()
if not result[ 'OK' ]:
cls.log.warn( "Could not connect to OAtokenDB (%s). Resorting to RPC" % result[ 'Message' ] )
result[ 'Value' ].close()
#Try to do magic
myStuff = dir( cls )
for method in OAToken.__remoteMethods__:
if method.find( "get" ) != 0 and method.find( "generate" ) != 0:
continue
if "export_%s" % method in myStuff:
cls.log.info( "Wrapping method %s. It's already defined in the Handler" % method )
else:
cls.log.info( "Mimicking method %s" % method )
setattr( cls, "auth_%s" % method, [ 'authenticated' ] )
setattr( cls, "types_%s" % method, [ types.TupleType ] )
setattr( cls, "export_%s" % method, cls.__mimeticFunction )
return S_OK()
def __unwrapArgs( self, margs ):
if len( margs ) < 1 or type( margs[0] ) != types.TupleType or ( len( margs ) > 1 and type( margs[1] ) != types.DictType ):
return S_ERROR( "Invalid arg stub. Expected tuple( args, kwargs? ), received %s" % str( margs ) )
if len( margs ) == 1:
return S_OK( ( margs[0], {} ) )
else:
return S_OK( ( margs[0], margs[1] ) )
def __mimeticFunction( self, margs ):
method = self.srv_getActionTuple()[1]
result = self.__unwrapArgs( margs )
if not result[ 'OK' ]:
return result
args, kwargs = result[ 'Value' ]
#DO PROPER AUTHENTICATION
if not self.__clientHasAccess():
return S_ERROR( "You're not authorized to access tokens" )
return getattr( self.__getOAToken(), method )( *args, **kwargs )
def __unwrapAndCall( self, margs ):
method = self.srv_getActionTuple()[1]
result = self.__unwrapArgs( margs )
if not result[ 'OK' ]:
return result
args, kwargs = result[ 'Value' ]
if not self.__clientHasAccess():
return S_ERROR( "You're not authorized to access tokens" )
return getattr( self, "_usr_def_%s" % method )( *args, **kwargs )
def __getOAToken( self ):
return OAToken.OAToken( forceLocal = True )
def __clientHasAccess( self ):
return True
|
DIRACGrid/RESTDIRAC
|
RESTSystem/Service/OATokenStoreHandler.py
|
Python
|
gpl-3.0
| 2,562
|
[
"DIRAC"
] |
bee84492dbc285d20f3d2cc235eb1790365791a34463c67d282d156f5badd671
|
#!/usr/bin/env python
'''
PBC-SOC integrals
'''
from pyscf.pbc import gto
cell = gto.Cell()
cell.atom='''
C 0.000000000000 0.000000000000 0.000000000000
C 1.685068664391 1.685068664391 1.685068664391
'''
cell.basis = 'ccpvdz'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.build()
#
# 1-center approximation
#
def get_1c_pvxp(cell, kpts=None):
import numpy
atom_slices = cell.offset_nr_by_atom()
nao = cell.nao_nr()
mat_soc = numpy.zeros((3,nao,nao))
for ia in range(cell.natm):
ish0, ish1, p0, p1 = atom_slices[ia]
shls_slice = (ish0, ish1, ish0, ish1)
with cell.with_rinv_as_nucleus(ia):
z = -cell.atom_charge(ia)
# Apply Koseki effective charge on z?
w = z * cell.intor('int1e_prinvxp', comp=3, shls_slice=shls_slice)
mat_soc[:,p0:p1,p0:p1] = w
return mat_soc
#
# SOC with lattice summation (G != 0)
#
def get_pbc_pvxp(cell, kpts=None):
import numpy
import copy
import time
from pyscf import lib
from pyscf.lib import logger
from pyscf.pbc import tools
from pyscf.gto import mole
from pyscf.pbc.df import ft_ao
from pyscf.pbc.df import aft_jk
from pyscf.pbc.df import aft
if kpts is None:
kpts_lst = numpy.zeros((1,3))
else:
kpts_lst = numpy.reshape(kpts, (-1,3))
log = logger.Logger(cell.stdout, cell.verbose)
t1 = t0 = (time.clock(), time.time())
mydf = aft.AFTDF(cell, kpts)
mydf.eta = 0.2
ke_guess = aft.estimate_ke_cutoff_for_eta(cell, mydf.eta, cell.precision)
mydf.mesh = tools.cutoff_to_mesh(cell.lattice_vectors(), ke_guess)
log.debug('mydf.mesh %s', mydf.mesh)
nkpts = len(kpts_lst)
nao = cell.nao_nr()
nao_pair = nao * (nao+1) // 2
Gv, Gvbase, kws = cell.get_Gv_weights(mydf.mesh)
charge = -cell.atom_charges() # Apply Koseki effective charge?
kpt_allow = numpy.zeros(3)
coulG = tools.get_coulG(cell, kpt_allow, mesh=mydf.mesh, Gv=Gv)
coulG *= kws
if mydf.eta == 0:
soc_mat = numpy.zeros((nkpts,3,nao*nao), dtype=numpy.complex128)
SI = cell.get_SI(Gv)
vG = numpy.einsum('i,ix->x', charge, SI) * coulG
else:
nuccell = copy.copy(cell)
half_sph_norm = .5/numpy.sqrt(numpy.pi)
norm = half_sph_norm/mole.gaussian_int(2, mydf.eta)
chg_env = [mydf.eta, norm]
ptr_eta = cell._env.size
ptr_norm = ptr_eta + 1
chg_bas = [[ia, 0, 1, 1, 0, ptr_eta, ptr_norm, 0] for ia in range(cell.natm)]
nuccell._atm = cell._atm
nuccell._bas = numpy.asarray(chg_bas, dtype=numpy.int32)
nuccell._env = numpy.hstack((cell._env, chg_env))
soc_mat = mydf._int_nuc_vloc(nuccell, kpts_lst, 'int3c2e_pvxp1_sph',
aosym='s1', comp=3)
soc_mat = numpy.asarray(soc_mat).reshape(nkpts,3,nao**2)
t1 = log.timer_debug1('pnucp pass1: analytic int', *t1)
aoaux = ft_ao.ft_ao(nuccell, Gv)
vG = numpy.einsum('i,xi->x', charge, aoaux) * coulG
max_memory = max(2000, mydf.max_memory-lib.current_memory()[0])
for aoaoks, p0, p1 in mydf.ft_loop(mydf.mesh, kpt_allow, kpts_lst,
max_memory=max_memory, aosym='s1',
intor='GTO_ft_pxp_sph', comp=3):
for k, aoao in enumerate(aoaoks):
aoao = aoao.reshape(3,-1,nao**2)
if aft_jk.gamma_point(kpts_lst[k]):
soc_mat[k] += numpy.einsum('k,ckx->cx', vG[p0:p1].real, aoao.real)
soc_mat[k] += numpy.einsum('k,ckx->cx', vG[p0:p1].imag, aoao.imag)
else:
soc_mat[k] += numpy.einsum('k,ckx->cx', vG[p0:p1].conj(), aoao)
t1 = log.timer_debug1('contracting pnucp', *t1)
soc_mat_kpts = []
for k, kpt in enumerate(kpts_lst):
if aft_jk.gamma_point(kpt):
soc_mat_kpts.append(soc_mat[k].real.reshape(3,nao,nao))
else:
soc_mat_kpts.append(soc_mat[k].reshape(3,nao,nao))
if kpts is None or numpy.shape(kpts) == (3,):
soc_mat_kpts = soc_mat_kpts[0]
return numpy.asarray(soc_mat_kpts)
soc_pbc = get_pbc_pvxp(cell)
soc_1c = get_1c_pvxp(cell)
print('PBC and 1-center SOC difference', abs(soc_pbc - soc_1c).max())
|
gkc1000/pyscf
|
examples/pbc/33-soc_integrals.py
|
Python
|
apache-2.0
| 4,376
|
[
"PySCF"
] |
d7b437b28d4369647d5d88f820315d9edf91178f37769cd16c5949e89d932d79
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import funannotate.library as lib
from funannotate.aux_scripts.fasta2agp import parse_scaffolds_makeagp
from pkg_resources import parse_version
import sys
import os
import subprocess
import shutil
import argparse
import re
import uuid
from natsort import natsorted
import warnings
from Bio import SeqIO
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from Bio import SearchIO
def MEROPSBlast(input, cpus, evalue, tmpdir, output, diamond=True):
# run blastp against merops
blast_tmp = os.path.join(tmpdir, 'merops.xml')
if diamond:
blastdb = os.path.join(FUNDB, 'merops.dmnd')
cmd = ['diamond', 'blastp', '--sensitive', '--query', input, '--threads', str(cpus),
'--out', blast_tmp, '--db', blastdb, '--evalue', str(
evalue), '--max-target-seqs', '1',
'--outfmt', '5']
else:
blastdb = os.path.join(FUNDB, 'MEROPS')
cmd = ['blastp', '-db', blastdb, '-outfmt', '5', '-out', blast_tmp, '-num_threads', str(cpus),
'-max_target_seqs', '1', '-evalue', str(evalue), '-query', input]
if not os.path.isfile(blast_tmp):
lib.runSubprocess4(cmd, '.', lib.log)
# parse results
with open(output, 'w') as out:
with open(blast_tmp, 'r') as results:
for qresult in SearchIO.parse(results, "blast-xml"):
hits = qresult.hits
ID = qresult.id
num_hits = len(hits)
if num_hits > 0:
if hits[0].hsps[0].evalue > evalue:
continue
sseqid = hits[0].id
out.write("%s\tnote\tMEROPS:%s\n" % (ID, sseqid))
def SwissProtBlast(input, cpus, evalue, tmpdir, GeneDict, diamond=True):
# run blastp against uniprot
blast_tmp = os.path.join(tmpdir, 'uniprot.xml')
if diamond:
blastdb = os.path.join(FUNDB, 'uniprot.dmnd')
cmd = ['diamond', 'blastp', '--sensitive', '--query', input,
'--threads', str(cpus), '--out', blast_tmp, '--db', blastdb,
'--evalue', str(evalue), '--max-target-seqs',
'1', '--outfmt', '5']
else:
blastdb = os.path.join(FUNDB, 'uniprot')
cmd = ['blastp', '-db', blastdb, '-outfmt', '5', '-out', blast_tmp,
'-num_threads', str(cpus), '-max_target_seqs', '1',
'-evalue', str(evalue), '-query', input]
if not lib.checkannotations(blast_tmp):
lib.runSubprocess4(cmd, '.', lib.log)
# parse results
counter = 0
total = 0
with open(blast_tmp, 'r') as results:
for qresult in SearchIO.parse(results, "blast-xml"):
hits = qresult.hits
qlen = qresult.seq_len
ID = qresult.id
num_hits = len(hits)
if num_hits > 0:
length = hits[0].hsps[0].aln_span
pident = hits[0].hsps[0].ident_num / float(length)
if pident < 0.6:
continue
diff = length / float(qlen)
if diff < 0.6:
continue
hdescript = hits[0].description.split(' OS=')[0]
name = hits[0].description.split('GN=')[-1]
name = name.split(' ')[0].upper()
name = name.replace('-', '')
passname = None
if not '_' in name and not ' ' in name and not '.' in name and number_present(name) and len(name) > 2 and not morethanXnumbers(name, 3):
passname = name
# need to do some filtering here of certain words
bad_words = ['(Fragment)', 'homolog', 'homolog,', 'AltName:']
# turn string into array, splitting on spaces
descript = hdescript.split(' ')
final_desc = [x for x in descript if x not in bad_words]
final_desc = ' '.join(final_desc)
total += 1
# add to GeneDict
if passname:
counter += 1
if not ID in GeneDict:
GeneDict[ID] = [
{'name': passname, 'product': final_desc,
'source': 'UniProtKB'}]
else:
GeneDict[ID].append(
{'name': passname, 'product': final_desc,
'source': 'UniProtKB'})
lib.log.info(
'{:,} valid gene/product annotations from {:,} total'.format(counter, total))
def number_present(s):
return any(i.isdigit() for i in s)
def morethanXnumbers(s, num):
count = 0
for i in s:
if number_present(i):
count += 1
if count >= num:
return True
else:
return False
def capfirst(x):
return x[0].upper() + x[1:]
def item2index(inputList, item):
# return the index of an item in the input list
item_index = None
for x in inputList:
if item.lower() in x.lower():
item_index = inputList.index(x)
return item_index
def getEggNogHeaders(input):
'''
function to get the headers from eggnog mapper annotations
web-based eggnog mapper has no header....
#web based 'guess'
0 query_name
1 seed_eggNOG_ortholog
2 seed_ortholog_evalue
3 seed_ortholog_score
4 predicted_gene_name
5 GO_terms
6 KEGG_KOs
7 BiGG_reactions
8 Annotation_tax_scope
9 OGs
10 bestOG|evalue|score
11 COG cat
12 eggNOG annot
'''
IDi, DBi, OGi, Genei, COGi, Desci = (None,)*6
with open(input, 'r') as infile:
for line in infile:
if line.startswith('#query_name'): # this is HEADER
line = line.rstrip()
headerCols = line.split('\t')
IDi = item2index(headerCols, 'query_name')
Genei = item2index(headerCols, 'predicted_gene_name')
DBi = item2index(headerCols, 'Annotation_tax_scope')
OGi = item2index(headerCols, 'OGs')
COGi = item2index(headerCols, 'COG cat')
Desci = item2index(headerCols, 'eggNOG annot')
break
if not IDi: # then no header file, so have to guess
IDi, DBi, OGi, Genei, COGi, Desci = (0, 8, 9, 4, 11, 12)
return IDi, DBi, OGi, Genei, COGi, Desci, None
def getEggNogHeadersv2(input):
'''
function to get the headers from eggnog mapper annotations
web-based eggnog mapper has no header....
'''
IDi, DBi, OGi, Genei, COGi, Desci, ECi = (None,)*7
with open(input, 'r') as infile:
for line in infile:
if line.startswith('#query'): # this is HEADER
line = line.rstrip()
headerCols = line.split('\t')
IDi = item2index(headerCols, '#query')
Genei = item2index(headerCols, 'Preferred_name')
DBi = item2index(headerCols, 'eggNOG OGs')
OGi = item2index(headerCols, 'best_og_name')
COGi = item2index(headerCols, 'best_og_cat')
Desci = item2index(headerCols, 'best_og_desc')
ECi = item2index(headerCols, 'EC')
break
return IDi, DBi, OGi, Genei, COGi, Desci, ECi
def getEggNogHeadersv212(input):
'''
function to get the headers from eggnog mapper annotations
'''
IDi, DBi, OGi, Genei, COGi, Desci, ECi = (None,)*7
with open(input, 'r') as infile:
for line in infile:
if line.startswith('#query'): # this is HEADER
line = line.rstrip()
headerCols = line.split('\t')
IDi = item2index(headerCols, '#query')
Genei = item2index(headerCols, 'Preferred_name')
DBi = item2index(headerCols, 'eggNOG_OGs')
OGi = item2index(headerCols, 'max_annot_lvl')
COGi = item2index(headerCols, 'COG_category')
Desci = item2index(headerCols, 'Description')
ECi = item2index(headerCols, 'EC')
break
return IDi, DBi, OGi, Genei, COGi, Desci, ECi
def parseEggNoggMapper(input, output, GeneDict):
# try to parse header
version, prefix = getEggnogVersion(input)
lib.log.info('EggNog version parsed as {}'.format(version))
if version and version > ('2.0.0') and version < ('2.0.5'):
lib.log.error('Unable to parse emapper results from v{}, please use either v1.0.3 or >=v2.0.5'.format(version))
return {}
if not prefix: # we have to guess here, sorry
prefix = 'ENOG50'
if not version: # also then we guess
version = '2.1.0'
lib.log.debug('EggNog annotation detected as emapper v{} and DB prefix {}'.format(version, prefix))
Definitions = {}
# indexes from header file
if version < ('2.0.0'):
IDi, DBi, OGi, Genei, COGi, Desci, ECi = getEggNogHeaders(input)
elif version < ('2.1.2'):
IDi, DBi, OGi, Genei, COGi, Desci, ECi = getEggNogHeadersv2(input)
else:
IDi, DBi, OGi, Genei, COGi, Desci, ECi = getEggNogHeadersv212(input)
# take annotations file from eggnog-mapper and create annotations
with open(output, 'w') as out:
with open(input, 'r') as infile:
for line in infile:
line = line.replace('\n', '')
if line.startswith('#'):
continue
cols = line.split('\t')
cols = ['' if x=='-' else x for x in cols]
ID = cols[IDi]
Description = cols[Desci].split('. ')[0]
Gene = ''
if cols[Genei] not in ['', '-']:
if not '_' in cols[Genei] and not '.' in cols[Genei] and number_present(cols[Genei]) and len(cols[Genei]) > 2 and not morethanXnumbers(cols[Genei], 3):
Gene = cols[Genei]
if version < ('2.0.0'):
EC = None
DB = cols[DBi].split('[')[0]
OGs = cols[OGi].split(',')
NOG = ''
for x in OGs:
if DB in x:
NOG = prefix + x.split('@')[0]
COGs = cols[COGi].replace(' ', '')
elif version < ('2.1.2'): # means we have v2 or great
try:
NOG, DB = cols[OGi].split('@')
except ValueError: # means either 0 or more than 1 "best_OG" drop for now
lib.log.debug("EggNog Parse ERROR: {}".format(line))
continue
OGs = cols[DBi].split(',')
if NOG == 'seed_ortholog': # not sure if this is bug, but get second to last OG from all
NOG, DB = OGs[-2].split('@')
DB = DB.split('|')[-1]
NOG = prefix+NOG
EC = cols[ECi]
if ',' in EC: # this is least common ancestor approach
EC = os.path.commonprefix(EC.split(',')).rstrip('.')
COGs = cols[COGi].replace(' ', '')
if len(COGs) > 1:
COGs = ''.join([c + ',' for c in COGs]).rstrip(',')
else:
DB = cols[OGi]
EC = cols[ECi]
if ',' in EC: # this is least common ancestor approach
EC = os.path.commonprefix(EC.split(',')).rstrip('.')
NOG = ''
OGs = cols[DBi].split(',')
for ogx in OGs:
nog_acc, taxname = ogx.split('@')
if taxname == DB:
NOG = nog_acc
NOG = prefix+NOG
COGs = cols[COGi].replace(' ', '')
if len(COGs) > 1:
COGs = ''.join([c + ',' for c in COGs]).rstrip(',')
#print(line)
#print(ID, Gene, Description, DB, EC, NOG, COGs)
if EC and EC != '':
out.write("%s\tEC_number\t%s\n" % (ID, EC))
if NOG == '':
continue
if not NOG in Definitions:
Definitions[NOG] = Description
out.write("%s\tnote\tEggNog:%s\n" % (ID, NOG))
if COGs != '':
out.write("%s\tnote\tCOG:%s\n" % (ID, COGs))
if Gene != '':
product = Gene.lower()+'p'
product = capfirst(product)
GeneID = ID
if not GeneID in GeneDict:
GeneDict[GeneID] = [
{'name': Gene, 'product': Description, 'source': 'EggNog-Mapper'}]
else:
GeneDict[GeneID].append(
{'name': Gene, 'product': Description, 'source': 'EggNog-Mapper'})
return Definitions
def getEggnogVersion(annotfile):
# try to parse the version of eggnog mapper used
# caveat here is web eggnog has no header!
vers = None
prefix = None
with open(annotfile, 'r') as infile:
for line in infile:
line = line.rstrip()
if not line.startswith('#'):
return vers, prefix
else:
if line.startswith('# emapper version:'):
vers = line.split('emapper-')[-1].split()[0]
prefix = 'ENOG41'
if line.startswith('## emapper-'):
vers = line.split('## emapper-')[-1]
prefix = 'ENOG50'
return vers, prefix
def get_emapper_version():
r = subprocess.Popen(['emapper.py', '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True).communicate()
if 'emapper' in r[0]:
i = 0
elif 'emapper' in r[1]:
i = 1
vers = r[i].strip()
m = re.match('emapper-(\S+)',vers)
if m:
vers = m.group(1)
return vers
else:
return False
def main(args):
# setup menu with argparse
class MyFormatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self, prog):
super(MyFormatter, self).__init__(prog, max_help_position=48)
parser = argparse.ArgumentParser(
prog='funannotate-functional.py',
usage="%(prog)s [options] -i folder --eggnog emapper.annotations --iprscan proteins.xml --cpus 12",
description='''Script that adds functional annotation to a genome.''',
epilog="""Written by Jon Palmer (2016-2017) nextgenusfs@gmail.com""",
formatter_class=MyFormatter)
parser.add_argument('-i', '--input',
help='Folder from funannotate predict.')
parser.add_argument('--genbank', help='Annotated genome in GenBank format')
parser.add_argument('--fasta', help='Genome in FASTA format')
parser.add_argument('--gff', help='GFF3 annotation file')
parser.add_argument('-o', '--out', help='Basename of output files')
parser.add_argument('--sbt', default='SBT',
help='Basename of output files')
parser.add_argument('-s', '--species',
help='Species name (e.g. "Aspergillus fumigatus")')
parser.add_argument('-t', '--tbl2asn', default='-l paired-ends',
help='Custom parameters for tbl2asn, example: linkage and gap info')
parser.add_argument('-a', '--annotations',
help='Custom annotations, tsv 3 column file')
parser.add_argument('-m', '--mito-pass-thru', dest='mito',
help='Mitochondrial contigs pass to tbl2asn, file:mcode')
parser.add_argument('--isolate', help='Isolate name (e.g. Af293)')
parser.add_argument('--strain', help='Strain name (e.g. CEA10)')
parser.add_argument('--cpus', default=2, type=int,
help='Number of CPUs to use')
parser.add_argument('--iprscan',
help='IPR5 XML file or folder of pre-computed InterProScan results')
parser.add_argument('--antismash',
help='antiSMASH results in genbank format')
parser.add_argument('--signalp',
help='signalp results caculted elsewhere')
parser.add_argument('--force', action='store_true',
help='Over-write output folder')
parser.add_argument('--phobius', help='Phobius results')
parser.add_argument('--eggnog', help='EggNog Mapper annotations')
parser.add_argument('--busco_db', default='dikarya',
help='BUSCO model database')
parser.add_argument('--p2g', help='NCBI p2g file from previous annotation')
parser.add_argument('-d', '--database',
help='Path to funannotate database, $FUNANNOTATE_DB')
parser.add_argument('--fix',
help='TSV ID GeneName Product file to over-ride automated process')
parser.add_argument('--remove',
help='TSV ID GeneName Product file to remove from annotation')
parser.add_argument('--rename', help='Rename locus tag')
parser.add_argument('--no-progress', dest='progress', action='store_false',
help='no progress on multiprocessing')
parser.add_argument('--header_length', default=16,
type=int, help='Max length for fasta headers')
parser.add_argument('--tmpdir', default='/tmp', help='volume to write tmp files')
args = parser.parse_args(args)
global parentdir, IPR2ANNOTATE, FUNDB
parentdir = os.path.join(os.path.dirname(__file__))
IPR2ANNOTATE = os.path.join(
parentdir, 'aux_scripts', 'iprscan2annotations.py')
# start here rest of script
# create log file
log_name = 'funannotate-annotate.'+str(uuid.uuid4())[-8:] + '.log'
if os.path.isfile(log_name):
os.remove(log_name)
# initialize script, log system info and cmd issue at runtime
lib.setupLogging(log_name)
cmd_args = " ".join(sys.argv)+'\n'
lib.log.debug(cmd_args)
print("-------------------------------------------------------")
lib.SystemInfo()
# get version of funannotate
version = lib.get_version()
lib.log.info("Running %s" % version)
# check dependencies
if args.antismash:
programs = ['hmmscan', 'hmmsearch', 'diamond', 'bedtools']
else:
programs = ['hmmscan', 'hmmsearch', 'diamond']
lib.CheckDependencies(programs)
# setup funannotate DB path
if args.database:
FUNDB = args.database.strip()
else:
try:
FUNDB = os.environ["FUNANNOTATE_DB"].strip()
except KeyError:
lib.log.error(
'Funannotate database not properly configured, run funannotate setup.')
sys.exit(1)
# check database sources, so no problems later
sources = [os.path.join(FUNDB, 'Pfam-A.hmm.h3p'), os.path.join(FUNDB, 'dbCAN.hmm.h3p'),
os.path.join(FUNDB, 'merops.dmnd'), os.path.join(FUNDB, 'uniprot.dmnd')]
if not all([os.path.isfile(f) for f in sources]):
lib.log.error(
'Database files not found in %s, run funannotate database and/or funannotate setup' % FUNDB)
sys.exit(1)
# check if diamond version matches database version
if not lib.CheckDiamondDB(os.path.join(FUNDB, 'merops.dmnd')):
lib.log.error(
'Diamond merops database was created with different version of diamond, please re-run funannotate setup')
sys.exit(1)
if not lib.CheckDiamondDB(os.path.join(FUNDB, 'uniprot.dmnd')):
lib.log.error(
'Diamond uniprot database was created with different version of diamond, please re-run funannotate setup')
sys.exit(1)
# write versions of Databases used to logfile
versDB = {}
if not lib.checkannotations(os.path.join(FUNDB, 'funannotate-db-info.txt')):
lib.log.error('Database not properly configured, %s missing. Run funannotate database and/or funannotate setup.' %
os.path.join(FUNDB, 'funannotate-db-info.txt'))
sys.exit(1)
with open(os.path.join(FUNDB, 'funannotate-db-info.txt'), 'r') as dbfile:
for line in dbfile:
line = line.strip()
name, type, file, version, date, num_records, mdchecksum = line.split(
'\t')
versDB[name] = version
# take care of some preliminary checks
if args.sbt == 'SBT':
SBT = os.path.join(parentdir, 'config', 'test.sbt')
lib.log.info(
"No NCBI SBT file given, will use default, however if you plan to submit to NCBI, create one and pass it here '--sbt'")
else:
SBT = args.sbt
# check other input files
if not os.path.isfile(SBT):
lib.log.error("SBT file not found, exiting")
sys.exit(1)
if args.antismash:
if not os.path.isfile(args.antismash):
lib.log.error("Antismash GBK file not found, exiting")
sys.exit(1)
# check buscos, download if necessary
if not os.path.isdir(os.path.join(FUNDB, args.busco_db)):
lib.log.error("ERROR: %s busco database is not found, install with funannotate setup -b %s" %
(args.busco_db, args.busco_db))
sys.exit(1)
# need to do some checks here of the input
genbank, Scaffolds, Protein, Transcripts, GFF, TBL = (None,)*6
existingStats = False
GeneCounts = 0
GeneDB = {}
if not args.input:
# did not parse folder of funannotate results, so need either gb + gff or fasta + proteins, + gff and also need to have args.out for output folder
if not args.out:
lib.log.error(
"If you are not providing funannotate predict input folder, then you need to provide an output folder (--out)")
sys.exit(1)
else:
outputdir = args.out
if os.path.isdir(outputdir):
lib.log.error(
"Found existing output directory %s. Warning, will re-use any intermediate files found." % (outputdir))
# create outputdir and subdirs if not already present
lib.createdir(outputdir)
lib.createdir(os.path.join(outputdir, 'annotate_misc'))
lib.createdir(os.path.join(outputdir, 'annotate_results'))
lib.createdir(os.path.join(outputdir, 'logfiles'))
if not args.genbank:
if not args.fasta or not args.gff:
lib.log.error(
"You did not specifiy the apropriate input files, either: \n1) GenBank \n2) Genome FASTA + GFF3")
sys.exit(1)
else:
Scaffolds = args.fasta
GFF = args.gff
Proteins = os.path.join(outputdir, 'annotate_misc', 'genome.proteins.fa')
Transcripts = os.path.join(outputdir, 'annotate_misc', 'genome.transcripts.fasta')
annotTBL = os.path.join(outputdir, 'annotate_misc', 'genome.tbl')
prefix = None
if args.rename:
prefix = args.rename.replace('_', '')
lib.log.info(
"Parsing annotation and preparing annotation files.")
GeneCounts, GeneDB = lib.convertgff2tbl(
GFF, prefix, Scaffolds, Proteins, Transcripts, annotTBL, external=True)
else:
genbank = args.genbank
Scaffolds = os.path.join(outputdir, 'annotate_misc', 'genome.scaffolds.fasta')
Proteins = os.path.join(outputdir, 'annotate_misc', 'genome.proteins.fasta')
Transcripts = os.path.join(outputdir, 'annotate_misc', 'genome.transcripts.fasta')
GFF = os.path.join(outputdir, 'annotate_misc', 'genome.gff3')
annotTBL = os.path.join(outputdir, 'annotate_misc', 'genome.tbl')
lib.log.info("Checking GenBank file for annotation")
if not lib.checkGenBank(genbank):
lib.log.error("Found no annotation in GenBank file, exiting")
sys.exit(1)
GeneCounts = lib.gb2parts(
genbank, annotTBL, GFF, Proteins, Transcripts, Scaffolds)
else:
# should be a folder, with funannotate files, thus store results there, no need to create output folder
if not os.path.isdir(args.input):
lib.log.error("%s directory does not exist" % args.input)
sys.exit(1)
# funannotate results 1) in update folder or 2) in predict folder
if os.path.isdir(os.path.join(args.input, 'update_results')):
inputdir = os.path.join(args.input, 'update_results')
outputdir = args.input
elif os.path.isdir(os.path.join(args.input, 'predict_results')):
inputdir = os.path.join(args.input, 'predict_results')
outputdir = args.input
else:
# here user specified the predict_results folder, or it is a custom folder
inputdir = os.path.abspath(args.input)
if '_results' in inputdir: # then it is the _results dir, so move up one directory
outputdir = os.path.dirname(inputdir)
else:
lib.log.error(
'Unable to detect funannotate folder as input, please provide -o,--out directory')
sys.exit(1)
annotTBL = os.path.join(outputdir, 'annotate_misc', 'genome.tbl')
# get files that you need
for file in os.listdir(inputdir):
if file.endswith('.gbk'):
genbank = os.path.join(inputdir, file)
if file.endswith('.gff3'):
GFF = os.path.join(inputdir, file)
if file.endswith('.tbl'):
TBL = os.path.join(inputdir, file)
if file.endswith('.stats.json'):
existingStats = os.path.join(inputdir, file)
# now create the files from genbank input file for consistency in gene naming, etc
if not genbank or not GFF:
lib.log.error(
"Properly formatted 'funannotate predict' files do no exist in this directory")
sys.exit(1)
else:
# if user gave predict_results folder, then set output to up one directory
if 'predict_results' in inputdir or 'update_results' in inputdir:
outputdir = lib.get_parent_dir(inputdir)
else:
if not args.out:
outputdir = inputdir # output the results in the input directory
else:
outputdir = args.out
if not os.path.isdir(outputdir):
os.makedirs(outputdir)
# create output directories
if not os.path.isdir(os.path.join(outputdir, 'annotate_misc')):
os.makedirs(os.path.join(outputdir, 'annotate_misc'))
if not os.path.isdir(os.path.join(outputdir, 'annotate_results')):
os.makedirs(os.path.join(outputdir, 'annotate_results'))
else:
lib.log.error(
"Found existing output directory %s. Warning, will re-use any intermediate files found." % (outputdir))
lib.log.info("Parsing input files")
Scaffolds = os.path.join(outputdir, 'annotate_misc', 'genome.scaffolds.fasta')
Proteins = os.path.join(outputdir, 'annotate_misc', 'genome.proteins.fasta')
Transcripts = os.path.join(outputdir, 'annotate_misc', 'genome.transcripts.fasta')
if TBL:
lib.log.info('Existing tbl found: {:}'.format(TBL))
shutil.copyfile(TBL, annotTBL)
if not lib.checkannotations(GFF):
GFF = os.path.join(
outputdir, 'annotate_misc', 'genome.gff3')
GeneCounts = lib.gb2gffnuc(
genbank, GFF, Proteins, Transcripts, Scaffolds)
else:
GeneCounts = lib.gb2nucleotides(
genbank, Proteins, Transcripts, Scaffolds)
else:
GFF = os.path.join(outputdir, 'annotate_misc', 'genome.gff3')
GeneCounts = lib.gb2parts(
genbank, annotTBL, GFF, Proteins, Transcripts, Scaffolds)
# double check that you have a TBL file, otherwise will have nothing to append to.
if not lib.checkannotations(annotTBL):
lib.log.error("NCBI tbl file not found, exiting")
sys.exit(1)
lib.log.debug('TBL file: {}'.format(annotTBL))
if not lib.checkannotations(GFF):
lib.log.error("GFF file not found, exiting")
sys.exit(1)
lib.log.debug('GFF3 file: {}'.format(GFF))
if not lib.checkannotations(Proteins):
lib.log.error("Protein FASTA file not found, exiting")
sys.exit(1)
lib.log.debug('Proteins file: {}'.format(Proteins))
# parse prefix from tbl file for existing
locusTagPrefix = None
with open(annotTBL, 'r') as infile:
for line in infile:
if line.startswith('\t\t\tlocus_tag\t'):
prelimTag = line.split('\t')[-1].rstrip()
if '_' in prelimTag:
locusTagPrefix = prelimTag.split('_')[0]
break
if args.rename and not locusTagPrefix:
lib.log.error('Error parsing existing locus_tag, expecting underscore "_" in locus_tag')
sys.exit(1)
# make sure logfiles directory is present, will need later
if not os.path.isdir(os.path.join(outputdir, 'logfiles')):
os.makedirs(os.path.join(outputdir, 'logfiles'))
if not os.path.isdir(os.path.join(outputdir, 'annotate_results')):
os.makedirs(os.path.join(outputdir, 'annotate_results'))
# get absolute path for all input so there are no problems later, not using Transcripts yet could be error? so take out here
Scaffolds, Proteins, GFF = [os.path.abspath(i) for i in [Scaffolds, Proteins, GFF]]
# check the genome fasta for any potential errors
bad_headers, bad_contigs, suspect_contigs = lib.analyzeAssembly(
Scaffolds, header_max=args.header_length)
if len(bad_headers) > 0 and not args.force:
lib.log.error("Genome assembly error: headers contain more characters than the max ({}), reformat headers to continue.".format(
args.header_length))
lib.log.error("First {:} headers that failed names:\n{}".format(len(bad_headers[:5]),
'\n'.join(bad_headers[:5])))
sys.exit(1)
elif len(bad_contigs) > 0:
lib.log.error('Found {:,} contigs contain non-IUPAC characters:'.format(len(bad_contigs)))
for k, v in natsorted(bad_contigs.items()):
print(k)
for x in v:
print(' {}\t{}'.format(x[0], x[1]))
lib.log.debug('{} {}'.format(k, v))
sys.exit(1)
elif len(suspect_contigs) > 0 and not args.force:
lib.log.error('Found {:,} bad contigs, where alphabet is less than 4 [this should not happen]'.format(
len(suspect_contigs)))
for k, v in natsorted(suspect_contigs.items()):
lib.log.debug('{} {}'.format(k, v))
print(k)
total = 0
for nuc, num in natsorted(v.items()):
print(' {:}: {:,}'.format(nuc, num))
total += int(num)
print('len: {:,}'.format(total))
print('-----------------------')
lib.log.info('If you really want to keep and annotate these contigs (not recommended), pass --force')
sys.exit(1)
# get organism and isolate from GBK file
organism, strain, isolate, accession, WGS_accession, gb_gi, version = (None,)*7
if genbank:
organism, strain, isolate, accession, WGS_accession, gb_gi, version = lib.getGBKinfo(
genbank)
# since can't find a way to propage the WGS_accession, writing to a file and then parse here
if os.path.isfile(os.path.join(outputdir, 'update_results', 'WGS_accession.txt')):
with open(os.path.join(outputdir, 'update_results', 'WGS_accession.txt'), 'r') as infile:
for line in infile:
line = line.replace('\n', '')
if line == 'None':
WGS_accession = None
else:
WGS_accession = line
# if command line species/strain/isolate passed, over-write detected
# check if organism/species/isolate passed at command line, if so, overwrite what you detected.
if args.species:
organism = args.species
if args.strain:
strain = args.strain
if args.isolate:
isolate = args.isolate
if not organism:
lib.log.error(
"No GenBank species and no species name given will cause problems downstream, please pass a name to -s,--species")
sys.exit(1)
if strain:
organism_name = organism+'_'+strain
elif isolate:
organism_name = organism+'_'+isolate
else:
organism_name = organism
organism_name = organism_name.replace(' ', '_')
lib.log.info("Adding Functional Annotation to %s, NCBI accession: %s" % (
organism, WGS_accession))
lib.log.info(
"Annotation consists of: {:,} gene models".format(int(GeneCounts)))
############################################################################
# start workflow here
ProtCount = lib.countfasta(Proteins)
lib.log.info('{0:,}'.format(ProtCount) + ' protein records loaded')
if ProtCount < 1:
lib.log.error("There are no gene models in this genbank file")
sys.exit(1)
# create tmpdir folder and split proteins into X CPUs to run with HMMER3 searches
protDir = os.path.join(outputdir, 'annotate_misc', 'split_prots')
if not os.path.isdir(protDir):
os.makedirs(protDir)
lib.fasta2chunks(Proteins, args.cpus, os.path.join(
outputdir, 'annotate_misc'), 'split_prots')
# run PFAM-A search
pfam_results = os.path.join(
outputdir, 'annotate_misc', 'annotations.pfam.txt')
if not lib.checkannotations(pfam_results):
lib.log.info("Running HMMer search of PFAM version %s" %
versDB.get('pfam'))
cmd = [sys.executable, os.path.join(parentdir, 'aux_scripts', 'hmmer_parallel.py'),
'-c', str(args.cpus), '-d', FUNDB, '-i', protDir,
'-o', pfam_results, '-m', 'pfam']
subprocess.call(cmd)
else:
lib.log.info('Existing Pfam-A results found: {:}'.format(pfam_results))
num_annotations = lib.line_count(pfam_results)
lib.log.info('{0:,}'.format(num_annotations) + ' annotations added')
# initiate Gene Name/Product dictionary
GeneProducts = {}
# run SwissProt Blast search
lib.log.info("Running Diamond blastp search of UniProt DB version %s" %
versDB.get('uniprot'))
blast_out = os.path.join(outputdir, 'annotate_misc',
'annotations.swissprot.txt')
SwissProtBlast(Proteins, args.cpus, 1e-5,
os.path.join(outputdir, 'annotate_misc'), GeneProducts)
# Check for EggNog annotations, parse if present
eggnog_out = os.path.join(
outputdir, 'annotate_misc', 'annotations.eggnog.txt')
eggnog_result = os.path.join(
outputdir, 'annotate_misc', 'eggnog.emapper.annotations')
egg_unique_id = str(uuid.uuid4())[-8:]
scratch_dir = os.path.join(args.tmpdir, 'emapper-{}'.format(egg_unique_id))
if args.eggnog:
if os.path.isfile(eggnog_result):
os.remove(eggnog_result)
shutil.copyfile(args.eggnog, eggnog_result)
if not lib.checkannotations(eggnog_result):
if lib.which('emapper.py'): # eggnog installed, so run it
lib.log.info("Running Eggnog-mapper")
cmd = ['emapper.py', '-m', 'diamond', '-i', Proteins,
'-o', 'eggnog', '--cpu', str(args.cpus)]
if parse_version(get_emapper_version()) >= parse_version('2.1.0'):
if not os.path.isdir(args.tmpdir):
os.makedirs(args.tmpdir)
if not os.path.isdir(scratch_dir):
os.makedirs(scratch_dir)
cmd += ['--scratch_dir', scratch_dir,
'--temp_dir', args.tmpdir]
if lib.MemoryCheck() >= 48:
cmd.append('--dbmem')
if parse_version(get_emapper_version()) >= parse_version('2.1.4'):
if parse_version(lib.getDiamondVersion()) < parse_version('2.0.11'):
cmd += ['--dmnd_iterate', 'no']
lib.runSubprocess(cmd, os.path.join(
outputdir, 'annotate_misc'), lib.log)
if os.path.isdir(scratch_dir):
shutil.rmtree(scratch_dir)
else:
lib.log.info(
"Install eggnog-mapper or use webserver to improve functional annotation: https://github.com/jhcepas/eggnog-mapper")
else:
lib.log.info(
'Existing Eggnog-mapper results found: {:}'.format(eggnog_result))
if lib.checkannotations(eggnog_result):
lib.log.info("Parsing EggNog Annotations")
EggNog = parseEggNoggMapper(eggnog_result, eggnog_out, GeneProducts)
if lib.checkannotations(eggnog_out):
num_annotations = lib.line_count(eggnog_out)
lib.log.info('{0:,}'.format(num_annotations) +
' COG and EggNog annotations added')
else:
lib.log.error("No Eggnog-mapper results found.")
EggNog = {}
RawProductNames = os.path.join(
outputdir, 'annotate_misc', 'uniprot_eggnog_raw_names.txt')
# GeneDict[ID] = [{'name': passname, 'product': final_desc}]
with open(RawProductNames, 'w') as uniprottmp:
for k, v in natsorted(list(GeneProducts.items())):
for x in v: # v is list of dictionaries
uniprottmp.write('{:}\t{:}\t{:}\t{:}\n'.format(
k, x['name'], x['product'], x['source']))
# combine the results from UniProt and Eggnog to parse Gene names and product descriptions
# load curated list
lib.log.info("Combining UniProt/EggNog gene and product names using Gene2Product version %s" %
versDB.get('gene2product'))
CuratedNames = {}
with open(os.path.join(FUNDB, 'ncbi_cleaned_gene_products.txt'), 'r') as input:
for line in input:
line = line.strip()
if line.startswith('#'):
continue
ID, product = line.split('\t')
if not ID in CuratedNames:
CuratedNames[ID] = product
GeneSeen = {}
NeedCurating = {}
NotInCurated = {}
thenots = []
for k, v in natsorted(list(GeneProducts.items())):
GeneName = None
GeneProduct = None
for x in v:
if x['name'] in CuratedNames:
GeneProduct = CuratedNames.get(x['name'])
GeneName = x['name']
elif x['name'].lower() in CuratedNames:
GeneProduct = CuratedNames.get(x['name'].lower())
GeneName = x['name']
if not GeneName: # taking first one will default to swissprot if products for both
GeneName = v[0]['name']
GeneProduct = v[0]['product']
OriginalProd = GeneProduct
thenots.append(GeneName)
# if not GeneName in NotInCurated:
# NotInCurated[GeneName] = GeneProduct
# now attempt to clean the product name
rep = {'potential': 'putative', 'possible': 'putative', 'probable': 'putative', 'predicted': 'putative',
'uncharacterized': 'putative', 'uncharacterised': 'putative', 'homolog': '', 'EC': '', 'COG': '',
'inactivated': '', 'related': '', 'family': '', 'gene': 'protein', 'homologue': '', 'open reading frame': '',
'frame': '', 'yeast': '', 'Drosophila': '', 'Yeast': '', 'drosophila': ''}
# replace words in dictionary, from https://stackoverflow.com/questions/6116978/python-replace-multiple-strings
rep = dict((re.escape(k), v) for k, v in rep.items())
pattern = re.compile("|".join(list(rep.keys())))
GeneProduct = pattern.sub(
lambda m: rep[re.escape(m.group(0))], GeneProduct)
# if gene name in product, convert to lowercase
if GeneName in GeneProduct:
GeneProduct = GeneProduct.replace(GeneName, GeneName.lower())
# check for some obvious errors, then change product description to gene name + p
if not GeneName in CuratedNames:
# some eggnog descriptions are paragraphs....
if 'By similarity' in GeneProduct or 'Required for' in GeneProduct or 'nvolved in' in GeneProduct or 'protein '+GeneName == GeneProduct or 'nherit from' in GeneProduct or len(GeneProduct) > 100:
OriginalProd = GeneProduct
GeneProduct = GeneName.lower()+'p'
GeneProduct = capfirst(GeneProduct)
if not GeneName in NeedCurating:
NeedCurating[GeneName] = [(OriginalProd, GeneProduct)]
else:
NeedCurating[GeneName].append((OriginalProd, GeneProduct))
# make sure not multiple spaces
GeneProduct = ' '.join(GeneProduct.split())
GeneProduct = GeneProduct.replace('()', '')
if '(' in GeneProduct and not ')' in GeneProduct:
GeneProduct = GeneProduct.split('(')[0].rstrip()
GeneProduct = GeneProduct.replace(' ,', ',')
# populate dictionary of NotInCurated
if GeneName in thenots:
if not GeneName in NotInCurated:
NotInCurated[GeneName] = [(OriginalProd, GeneProduct)]
else:
NotInCurated[GeneName].append((OriginalProd, GeneProduct))
if not GeneName in GeneSeen:
GeneSeen[GeneName] = [(k, GeneProduct)]
else:
GeneSeen[GeneName].append((k, GeneProduct))
# finally output the annotations
# which genes are duplicates, need to append numbers to those gene names and then finally output annotations
Gene2ProdFinal = {}
with open(os.path.join(outputdir, 'annotate_misc', 'annotations.genes-products.txt'), 'w') as gene_annotations:
for key, value in natsorted(list(GeneSeen.items())):
if len(value) > 1:
try:
testMultiple = len(set([x[0].split('-T')[0] for x in value]))
except:
testMultiple = len(value)
for i in range(0, len(value)):
if testMultiple > 1:
gene_annotations.write(
"%s\tname\t%s_%i\n" % (value[i][0], key, i+1))
else:
gene_annotations.write(
"%s\tname\t%s\n" % (value[i][0], key))
gene_annotations.write(
"%s\tproduct\t%s\n" % (value[i][0], value[i][1]))
Gene2ProdFinal[value[i][0]] = (
key+'_'+str(i+1), value[i][1])
else:
gene_annotations.write("%s\tname\t%s\n" % (value[0][0], key))
gene_annotations.write("%s\tproduct\t%s\n" %
(value[0][0], value[0][1]))
Gene2ProdFinal[value[0][0]] = (key, value[0][1])
num_annotations = int(lib.line_count(os.path.join(
outputdir, 'annotate_misc', 'annotations.genes-products.txt')) / 2)
lib.log.info('{:,} gene name and product description annotations added'.format(
num_annotations))
# run MEROPS Blast search
blast_out = os.path.join(outputdir, 'annotate_misc',
'annotations.merops.txt')
if not lib.checkannotations(blast_out):
lib.log.info(
"Running Diamond blastp search of MEROPS version %s" % versDB.get('merops'))
MEROPSBlast(Proteins, args.cpus, 1e-5,
os.path.join(outputdir, 'annotate_misc'), blast_out)
else:
lib.log.info('Existing MEROPS results found: {:}'.format(blast_out))
num_annotations = lib.line_count(blast_out)
lib.log.info('{0:,}'.format(num_annotations) + ' annotations added')
# run dbCAN search
dbCAN_out = os.path.join(outputdir, 'annotate_misc',
'annotations.dbCAN.txt')
if not lib.checkannotations(dbCAN_out):
lib.log.info(
"Annotating CAZYmes using HMMer search of dbCAN version %s" % versDB.get('dbCAN'))
cmd = [sys.executable,
os.path.join(parentdir, 'aux_scripts', 'hmmer_parallel.py'),
'-c', str(args.cpus), '-d', FUNDB, '-i', protDir,
'-o', dbCAN_out, '-m', 'cazy']
subprocess.call(cmd)
else:
lib.log.info('Existing CAZYme results found: {:}'.format(dbCAN_out))
num_annotations = lib.line_count(dbCAN_out)
lib.log.info('{:,} annotations added'.format(num_annotations))
# run BUSCO OGS search
busco_out = os.path.join(
outputdir, 'annotate_misc', 'annotations.busco.txt')
buscoDB = os.path.join(FUNDB, args.busco_db)
if not lib.checkannotations(busco_out):
lib.log.info("Annotating proteins with BUSCO %s models" %
args.busco_db)
lib.runBUSCO(Proteins, buscoDB, args.cpus, os.path.join(
outputdir, 'annotate_misc'), busco_out)
else:
lib.log.info('Existing BUSCO2 results found: {:}'.format(busco_out))
num_annotations = lib.line_count(busco_out)
lib.log.info('{0:,}'.format(num_annotations) + ' annotations added')
# run Phobius if local is installed, otherwise you will have to use funannotate remote
phobius_out = os.path.join(
outputdir, 'annotate_misc', 'phobius.results.txt')
phobiusLog = os.path.join(outputdir, 'logfiles', 'phobius.log')
if args.phobius:
if os.path.isfile(phobius_out):
os.remove(phobius_out)
shutil.copyfile(args.phobius, phobius_out)
if not lib.checkannotations(phobius_out):
if lib.which('phobius.pl'):
if not lib.checkannotations(phobius_out):
lib.log.info(
"Predicting secreted and transmembrane proteins using Phobius")
subprocess.call([os.path.join(parentdir, 'aux_scripts', 'phobius-multiproc.py'),
'-i', Proteins, '-o', phobius_out, '-l', phobiusLog])
else:
lib.log.info(
"Skipping phobius predictions, try funannotate remote -m phobius")
else:
lib.log.info('Existing Phobius results found: {:}'.format(phobius_out))
# run signalP if installed, have to manually install, so test if exists first, then run it if it does, parse results
signalp_out = os.path.join(
outputdir, 'annotate_misc', 'signalp.results.txt')
secreted_out = os.path.join(
outputdir, 'annotate_misc', 'annotations.secretome.txt')
membrane_out = os.path.join(
outputdir, 'annotate_misc', 'annotations.transmembrane.txt')
if args.signalp:
shutil.copyfile(args.signalp, signalp_out)
if lib.which('signalp') or lib.checkannotations(signalp_out):
if not lib.checkannotations(signalp_out):
lib.log.info("Predicting secreted proteins with SignalP")
lib.signalP(Proteins, os.path.join(
outputdir, 'annotate_misc'), signalp_out)
else:
lib.log.info(
'Existing SignalP results found: {:}'.format(signalp_out))
if lib.checkannotations(phobius_out):
lib.parsePhobiusSignalP(
phobius_out, signalp_out, membrane_out, secreted_out)
else:
lib.parseSignalP(signalp_out, secreted_out)
else:
if not lib.checkannotations(phobius_out):
lib.log.info(
"Skipping secretome: neither SignalP nor Phobius searches were run")
else:
lib.log.info(
"SignalP not installed, secretome prediction less accurate using only Phobius")
lib.parsePhobiusSignalP(
phobius_out, False, membrane_out, secreted_out)
if lib.checkannotations(secreted_out):
num_secreted = lib.line_count(secreted_out)
else:
num_secreted = 0
if lib.checkannotations(membrane_out):
num_mem = lib.line_count(membrane_out)
else:
num_mem = 0
lib.log.info('{0:,}'.format(num_secreted) + ' secretome and ' +
'{0:,}'.format(num_mem) + ' transmembane annotations added')
# interproscan
IPRCombined = os.path.join(outputdir, 'annotate_misc', 'iprscan.xml')
IPR_terms = os.path.join(outputdir, 'annotate_misc',
'annotations.iprscan.txt')
if args.iprscan and args.iprscan != IPRCombined:
if os.path.isfile(IPRCombined):
os.remove(IPRCombined)
shutil.copyfile(args.iprscan, IPRCombined)
if not lib.checkannotations(IPRCombined):
lib.log.error(
"InterProScan error, %s is empty, or no XML file passed via --iprscan. Functional annotation will be lacking." % IPRCombined)
else:
if os.path.isfile(IPR_terms):
if os.path.getmtime(IPR_terms) < os.path.getmtime(IPRCombined):
os.remove(IPR_terms)
if not lib.checkannotations(IPR_terms):
lib.log.info("Parsing InterProScan5 XML file")
cmd = [sys.executable, IPR2ANNOTATE, IPRCombined, IPR_terms]
lib.runSubprocess(cmd, '.', lib.log)
# check if antiSMASH data is given, if so parse and reformat for annotations and cluster textual output
antismash_input = os.path.join(
outputdir, 'annotate_misc', 'antiSMASH.results.gbk')
if args.antismash:
if os.path.isfile(antismash_input):
os.remove(antismash_input)
shutil.copyfile(args.antismash, antismash_input)
if lib.checkannotations(antismash_input): # result found
AntiSmashFolder = os.path.join(outputdir, 'annotate_misc', 'antismash')
AntiSmashBed = os.path.join(AntiSmashFolder, 'clusters.bed')
GFF2clusters = os.path.join(AntiSmashFolder, 'secmet.clusters.txt')
AntiSmash_annotations = os.path.join(
outputdir, 'annotate_misc', 'annotations.antismash.txt')
Cluster_annotations = os.path.join(
outputdir, 'annotate_misc', 'annotations.antismash.clusters.txt')
if os.path.isdir(AntiSmashFolder):
shutil.rmtree(AntiSmashFolder)
os.makedirs(AntiSmashFolder)
# results in several dictionaries
bbDomains, bbSubType, BackBone = lib.ParseAntiSmash(antismash_input,
AntiSmashFolder,
AntiSmashBed,
AntiSmash_annotations)
# results in dictClusters dictionary
dictClusters = lib.GetClusterGenes(AntiSmashBed, GFF, Scaffolds,
Cluster_annotations)
# if custom annotations passed, parse here
'''
if args.annotations:
lib.log.info("Parsing custom annotations from %s" % args.annotations)
shutil.copyfile(args.annotations, os.path.join(
outputdir, 'annotate_misc', 'annotations.custom.txt'))
num_annotations = lib.line_count(os.path.join(
outputdir, 'annotate_misc', 'annotations.custom.txt'))
lib.log.info('{0:,}'.format(num_annotations) + ' annotations added')
'''
# now bring all annotations together and annotated genome using gag, remove any duplicate annotations
ANNOTS = os.path.join(outputdir, 'annotate_misc', 'all.annotations.txt')
GeneNames = lib.getGeneBasename(Proteins)
total_annotations = 0
filtered_annotations = 0
lines_seen = set()
with open(ANNOTS, 'w') as output:
for file in os.listdir(os.path.join(outputdir, 'annotate_misc')):
if file.startswith('annotations'):
file = os.path.join(outputdir, 'annotate_misc', file)
with open(file) as input:
for line in input:
total_annotations += 1
if not line.startswith(tuple(GeneNames)):
continue
if line.count('\t') != 2: # make sure it is 3 columns
continue
if line not in lines_seen:
output.write(line)
lines_seen.add(line)
filtered_annotations += 1
ANNOTS = os.path.abspath(ANNOTS)
diff_annotations = total_annotations - filtered_annotations
lib.log.info("Found " + '{0:,}'.format(diff_annotations) + " duplicated annotations, adding " +
'{0:,}'.format(filtered_annotations) + ' valid annotations')
# setup tbl2asn folder
if os.path.isdir(os.path.join(outputdir, 'annotate_misc', 'tbl2asn')):
lib.SafeRemove(os.path.join(outputdir, 'annotate_misc', 'tbl2asn'))
os.makedirs(os.path.join(outputdir, 'annotate_misc', 'tbl2asn'))
TBLOUT = os.path.join(outputdir, 'annotate_misc', 'tbl2asn', 'genome.tbl')
tbl2genome = os.path.join(outputdir, 'annotate_misc', 'tbl2asn', 'genome.fsa')
shutil.copyfile(Scaffolds, tbl2genome)
# check for mitochondrial genome/contigs to pass-thru
if args.mito:
if ':' in args.mito:
mitocontigs, mcode = args.mito.rsplit(':', 1)
else:
mitocontigs = args.mito
mcode = 4
if not lib.checkannotations(mitocontigs):
lib.log.error('Mitochondrial pass thru detected, but {} is not a file or is empty'.format(mitocontigs))
else:
# mcode should be an integer
try:
mcode = int(mcode)
except ValueError:
lib.log.error('Mitochondrial pass thru mocde {} is not an integer'.format(mcode))
if isinstance(mcode, int):
# now we can safely add to genome.fsa
with open(tbl2genome, 'a') as outfile:
with open(mitocontigs, 'r') as infile:
for rec in SeqIO.parse(infile, 'fasta'):
if 'circular' in rec.description:
topology = '[topology=circular] '
else:
topology = ''
outfile.write(
'>{} [mcode={}] {}[location=mitochondrion]\n{}\n'.format(
rec.id, mcode, topology, lib.softwrap(str(rec.seq))))
# add annotation to tbl annotation file, generate dictionary of dictionaries with values as a list
# need to keep multiple transcripts annotations separate, so this approach may have to modified
# custom annotations take precedence so parse differently
Annotations = lib.annotations2dict(ANNOTS, geneDB=GeneDB,
custom=args.annotations)
# to update annotations, user can pass --fix or --remove, update Annotations here
if args.fix:
with open(args.fix, 'r') as fixfile:
for line in fixfile:
line = line.strip()
if line.startswith('#'):
continue
# ID Name Description Error (could be more columns i guess)
cols = line.split('\t')
if len(cols) < 3: # skip if number of columns isn't correct
continue
if cols[0] in Annotations:
Annotations[cols[0]]['name'] = [cols[1]]
Annotations[cols[0]]['product'] = [cols[2]]
if cols[1] in NotInCurated:
NotInCurated[cols[1]] = [cols[2]]
if cols[1] in NeedCurating:
old = NeedCurating.get(cols[1])
NeedCurating[cols[1]] = (old[0], cols[2])
if cols[0] in Gene2ProdFinal:
Gene2ProdFinal[cols[0]] = (cols[1], cols[2])
if args.remove:
with open(args.remove, 'r') as removefile:
for line in removefile:
line = line.strip()
if line.startswith('#'):
continue
cols = line.split('\t')
if cols[0] in Annotations:
if 'name' in Annotations[cols[0]]:
del Annotations[cols[0]]['name']
if 'product' in Annotations[cols[0]]:
del Annotations[cols[0]]['product']
if cols[0] in Gene2ProdFinal:
del Gene2ProdFinal[cols[0]]
# grab some info from the annotation dictionary
IPRterms = []
NoteHeaders = []
for k, v in natsorted(Annotations.items()):
if 'note' in v:
for x in v['note']:
if ':' in x:
h = x.split(':', 1)[0]
if h.startswith('SMCOG'):
continue
if h not in NoteHeaders:
NoteHeaders.append(h)
elif 'db_xref' in v:
for y in v['db_xref']:
if y.startswith('InterPro'):
g = y.split(':', 1)[1]
if not g in IPRterms:
IPRterms.append(g)
NoteHeaders = natsorted(NoteHeaders)
# now parse tbl file and add annotations
if args.rename and '_' in args.rename:
args.rename = args.rename.split('_')[0]
lib.updateTBL(annotTBL, Annotations, TBLOUT, prefix=locusTagPrefix,
newtag=args.rename)
# if this is reannotation, then need to fix tbl file to track gene changes
if WGS_accession:
shutil.copyfile(os.path.join(outputdir, 'annotate_misc', 'tbl2asn', 'genome.tbl'),
os.path.join(outputdir, 'annotate_misc', 'tbl2asn', 'genome.tbl.bak'))
p2g = {}
# see if p2g file is present
p2gfile = None
if os.path.isfile(os.path.join(outputdir, 'update_results', 'ncbi.p2g')):
p2gfile = os.path.join(outputdir, 'update_results', 'ncbi.p2g')
else:
if args.p2g:
p2gfile = args.p2g
if p2gfile:
with open(p2gfile, 'r') as input:
for line in input:
cols = line.split('\t')
if not cols[0] in p2g:
p2g[cols[0]] = cols[1]
with open(os.path.join(outputdir, 'annotate_misc', 'tbl2asn', 'genome.tbl'), 'w') as outfile:
with open(os.path.join(outputdir, 'annotate_misc', 'tbl2asn', 'genome.tbl.bak'), 'r') as infile:
for line in infile:
line = line.replace('\n', '')
if line.startswith('\t\t\tprotein_id') or line.startswith('\t\t\ttranscript_id'):
ID = line.rsplit('|', 1)[-1].replace('_mrna', '')
type = 'prot'
if 'transcript_id' in line:
type = 'transcript'
if not ID in p2g:
if type == 'prot':
outfile.write(
'\t\t\tprotein_id\tgnl|%s|%s\n' % (WGS_accession, ID))
elif type == 'transcript':
outfile.write(
'\t\t\ttranscript_id\tgnl|%s|%s_mrna\n' % (WGS_accession, ID))
else:
p2gID = p2g.get(ID)
if type == 'prot':
outfile.write('\t\t\tprotein_id\tgnl|%s|%s|gb|%s\n' % (
WGS_accession, ID, p2gID))
elif type == 'transcript':
outfile.write(
'\t\t\ttranscript_id\tgnl|%s|%s_mrna\n' % (WGS_accession, ID))
else:
outfile.write('%s\n' % line)
else:
lib.log.error(
"Detected NCBI reannotation, but couldn't locate p2g file, please pass via --p2g")
shutil.copyfile(os.path.join(outputdir, 'annotate_misc', 'tbl2asn', 'genome.tbl.bak'),
os.path.join(outputdir, 'annotate_misc', 'tbl2asn', 'genome.tbl'))
# launch tbl2asn to create genbank submission files
discrep = 'discrepency.report.txt'
lib.log.info("Converting to final Genbank format, good luck!")
if not version:
annot_version = 1
else:
annot_version = version
# have to run as subprocess because of multiprocessing issues
cmd = [sys.executable,
os.path.join(parentdir, 'aux_scripts', 'tbl2asn_parallel.py'),
'-i', TBLOUT,
'-f', os.path.join(outputdir, 'annotate_misc', 'tbl2asn', 'genome.fsa'),
'-o', os.path.join(outputdir, 'annotate_misc', 'tbl2asn'),
'--sbt', SBT, '-d', discrep,
'-s', organism, '-t', args.tbl2asn,
'-v', str(annot_version), '-c', str(args.cpus)]
if args.isolate:
cmd += ['--isolate', args.isolate]
if args.strain:
cmd += ['--strain', args.strain]
lib.log.debug(' '.join(cmd))
subprocess.call(cmd)
# check if completed succesfully
if not lib.checkannotations(os.path.join(outputdir, 'annotate_misc', 'tbl2asn', 'genome.gbf')):
lib.log.info('ERROR: GBK file conversion failed, tbl2asn parallel script has died')
sys.exit(1)
# parse discrepancy report to see which names/product descriptions failed/passed
# return dict containing tuples of (GeneName, GeneProduct, [reason])
BadProducts = []
if os.path.isfile(discrep) and os.path.exists(discrep):
BadProducts = lib.getFailedProductNames(discrep, Gene2ProdFinal)
Gene2ProductPassed = os.path.join(
outputdir, 'annotate_results', 'Gene2Products.new-names-passed.txt')
PassedCounts = 0
with open(Gene2ProductPassed, 'w') as prodpassed:
prodpassed.write('#Name\tPassed Description\n')
for key, value in natsorted(list(NotInCurated.items())):
if not key in BadProducts and not key in NeedCurating:
PassedCounts += 1
prodpassed.write('%s\t%s\n' % (key, value[0][1]))
Gene2ProductHelp = os.path.join(
outputdir, 'annotate_results', 'Gene2Products.need-curating.txt')
MustFixHelp = os.path.join(
outputdir, 'annotate_results', 'Gene2Products.must-fix.txt')
CurateCount = 0
MustFixCount = 0
with open(Gene2ProductHelp, 'w') as needhelp:
needhelp.write(
'#Name\tOriginal Description\tCleaned Description\tError-message\n')
for key, value in natsorted(list(NeedCurating.items())):
CurateCount += 1
needhelp.write('%s\t%s\t%s\tProduct defline failed funannotate checks\n' % (
key, value[0][0], value[0][1]))
with open(MustFixHelp, 'w') as musthelp:
musthelp.write('#GeneID\tName\tProduct Description\ttbl2asn Error\n')
if BadProducts:
for key, value in natsorted(list(BadProducts.items())):
MustFixCount += 1
musthelp.write('%s\t%s\t%s\t%s\n' %
(value[1], key, value[0], ', '.join(value[2])))
# collected output files and rename accordingly
ResultsFolder = os.path.join(outputdir, 'annotate_results')
if os.path.exists(discrep) and os.path.isfile(discrep):
shutil.copyfile(discrep, os.path.join(ResultsFolder,
organism_name+'.discrepency.report.txt'))
os.remove(discrep)
else:
lib.log.error('no discrepency file %s found'%(discrep))
final_tbl = os.path.join(ResultsFolder, organism_name+'.tbl')
final_gbk = os.path.join(ResultsFolder, organism_name+'.gbk')
final_gff = os.path.join(ResultsFolder, organism_name+'.gff3')
final_proteins = os.path.join(ResultsFolder, organism_name+'.proteins.fa')
final_transcripts = os.path.join(
ResultsFolder, organism_name+'.mrna-transcripts.fa')
final_cds_transcripts = os.path.join(
ResultsFolder, organism_name+'.cds-transcripts.fa')
final_fasta = os.path.join(ResultsFolder, organism_name+'.scaffolds.fa')
final_annotation = os.path.join(
ResultsFolder, organism_name+'.annotations.txt')
final_stats = os.path.join(ResultsFolder, organism_name+'.stats.json')
shutil.copyfile(os.path.join(outputdir, 'annotate_misc',
'tbl2asn', 'genome.gbf'), final_gbk)
shutil.copyfile(os.path.join(outputdir, 'annotate_misc',
'tbl2asn', 'genome.tbl'), final_tbl)
# because of possible splitting tbl2asn output, loop through and get sqn and tbl parts
for file in os.listdir(os.path.join(outputdir, 'annotate_misc', 'tbl2asn')):
if file.endswith('.sqn') or file.endswith('.tbl'):
if 'genome.' in file:
updatedName = file.replace('genome', organism_name)
else:
updatedName = file.replace('genome', organism_name+'.part_')
shutil.copyfile(os.path.join(outputdir, 'annotate_misc',
'tbl2asn', file), os.path.join(ResultsFolder, updatedName))
lib.tbl2allout(final_tbl, Scaffolds, final_gff, final_proteins,
final_transcripts, final_cds_transcripts, final_fasta)
lib.annotation_summary(Scaffolds, final_stats, tbl=final_tbl,
previous=existingStats, database=FUNDB,
command=' '.join(sys.argv),
organism=organism_name)
# write AGP output so all files in correct directory
lib.log.info("Creating AGP file and corresponding contigs file")
# no reason to use suprocess here, we should be able to import and run
#agp2fasta = os.path.join(parentdir, 'aux_scripts', 'fasta2agp.py')
agp_final = os.path.join(ResultsFolder, organism_name+'.agp')
agp_contigs = os.path.join(ResultsFolder, organism_name+'.contigs.fsa')
parse_scaffolds_makeagp(final_fasta, agp_final, agp_contigs)
#cmd = ['python', agp2fasta, organism_name+'.scaffolds.fa',AGP]
#lib.runSubprocess(cmd, ResultsFolder, lib.log)
# write secondary metabolite clusters output using the final genome in gbk format
if lib.checkannotations(antismash_input):
lib.log.info(
"Cross referencing SM cluster hits with MIBiG database version %s" % versDB.get('mibig'))
# do a blast best hit search against MIBiG database for cluster annotation, but looping through gene cluster hits
AllProts = []
SMgenes = []
for k, v in list(dictClusters.items()):
for i in v:
if '-T' in i:
ID = i.split('-T')[0]
else:
ID = i
if not i in AllProts:
AllProts.append(i)
if not ID in SMgenes:
SMgenes.append(ID)
AllProts = set(AllProts)
mibig_fasta = os.path.join(AntiSmashFolder, 'smcluster.proteins.fasta')
mibig_blast = os.path.join(
AntiSmashFolder, 'smcluster.MIBiG.blast.txt')
mibig_db = os.path.join(FUNDB, 'mibig.dmnd')
with open(mibig_fasta, 'w') as output:
with open(Proteins, 'r') as input:
SeqRecords = SeqIO.parse(Proteins, 'fasta')
for record in SeqRecords:
genename = record.id
if genename in AllProts:
SeqIO.write(record, output, 'fasta')
cmd = ['diamond', 'blastp', '--sensitive', '--query', mibig_fasta,
'--threads', str(args.cpus), '--out', mibig_blast,
'--db', mibig_db, '--max-hsps', '1',
'--evalue', '0.001', '--max-target-seqs', '1',
'--outfmt', '6']
lib.runSubprocess4(cmd, '.', lib.log)
# now parse blast results to get {qseqid: hit}
MIBiGBlast = {}
with open(mibig_blast, 'r') as input:
for line in input:
cols = line.split('\t')
if '-T' in cols[0]:
ID = cols[0].split('-T')[0]
else:
ID = cols[0]
hit = cols[1].split('|')
desc = hit[5]
cluster = hit[0]
db_ref = hit[6]
evalue = cols[10]
pident = cols[2]
result = (desc, cluster, db_ref, pident, evalue)
MIBiGBlast[ID] = result
lib.log.info("Creating tab-delimited SM cluster output")
# load in antismash cluster bed file to slice record
slicing = []
with open(AntiSmashBed, 'r') as antibed:
for line in antibed:
cols = line.split('\t')
# chr, cluster, start, stop in a tuple
cluster = (cols[0], cols[3], cols[1], cols[2])
slicing.append(cluster)
Offset = {}
# Get each cluster + 15 Kb in each direction to make sure you can see the context of the cluster
with open(os.path.join(ResultsFolder, organism_name+'.gbk'), 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
for f in record.features:
if f.type == "source":
record_end = f.location.end
for slice in slicing:
if record.id == slice[0]:
sub_start = int(slice[2]) - 15000
sub_stop = int(slice[3]) + 15000
if sub_start < 1:
sub_start = 1
if sub_stop > record_end:
sub_stop = record_end
sub_record = record[sub_start:sub_stop]
# this seems to be either py3 requirement or required in newer biopython
sub_record.annotations = record.annotations
cluster_name = slice[1]
sub_record_name = os.path.join(
AntiSmashFolder, cluster_name+'.gbk')
Offset[cluster_name] = sub_start
with open(sub_record_name, 'w') as clusterout:
try:
SeqIO.write(sub_record, clusterout, 'genbank')
except ValueError:
print(slice)
print(sub_record.id)
print(sub_record.annotations)
sys.exit(1)
# okay, now loop through each cluster
for file in os.listdir(AntiSmashFolder):
if file.endswith('.gbk'):
base = file.replace('.gbk', '')
outputName = os.path.join(
AntiSmashFolder, base+'.secmet.cluster.txt')
file = os.path.join(AntiSmashFolder, file)
with open(outputName, 'w') as output:
output.write("#%s\n" % base)
output.write(
"#GeneID\tChromosome:start-stop\tStrand\tClusterPred\tBackbone Enzyme\tBackbone Domains\tProduct\tsmCOGs\tEggNog\tInterPro\tPFAM\tGO terms\tNotes\tMIBiG Blast\tProtein Seq\tDNA Seq\n")
with open(file, 'r') as input:
SeqRecords = SeqIO.parse(input, 'genbank')
for record in SeqRecords:
for f in record.features:
if f.type == "CDS":
name = f.qualifiers["locus_tag"][0]
prot_seq = f.qualifiers['translation'][0]
start = f.location.nofuzzy_start
# account for python numbering shift?
actualStart = int(
start) + int(Offset.get(base)) + 1
end = f.location.nofuzzy_end
actualEnd = int(end) + \
int(Offset.get(base))
strand = f.location.strand
if strand == 1:
strand = '+'
DNA_seq = record.seq[start:end]
elif strand == -1:
strand = '-'
DNA_seq = record.seq[start:end].reverse_complement(
)
chr = record.id
product = f.qualifiers["product"][0]
# now get the info out of the note and db_xref fields, need to clear each field for each record
note = []
goTerms = []
pFAM = []
IPR = []
eggnogDesc = 'NA'
if name in SMgenes:
location = 'cluster'
else:
location = 'flanking'
cog = '.'
for k, v in list(f.qualifiers.items()):
if k == 'note':
# multiple notes are split with a semi colon
items = v[0].split('; ')
for i in items:
if i.startswith('EggNog:'):
eggnogID = i.replace(
'EggNog:', '')
eggnogDesc = EggNog.get(
eggnogID)
elif i.startswith('GO_'):
goterm = i.split(
': ', 1)[-1]
goTerms.append(goterm)
elif i.startswith('SMCOG'):
cog = i
else:
note.append(i)
if k == 'db_xref':
for i in v:
if i.startswith('InterPro:'):
r = i.replace(
'InterPro:', '')
IPR.append(r)
if i.startswith('PFAM:'):
p = i.replace('PFAM:', '')
pFAM.append(p)
if name in bbDomains:
domains = ";".join(bbDomains.get(name))
else:
domains = '.'
if name in bbSubType:
enzyme = bbSubType.get(name)
else:
if name in BackBone:
enzyme = BackBone.get(name)
else:
enzyme = '.'
if name in MIBiGBlast:
mibigTup = MIBiGBlast.get(name)
mibig = mibigTup[0]+' from '+mibigTup[1] + \
' ('+mibigTup[2]+':pident=' + \
mibigTup[3]+', evalue=' + \
mibigTup[4]+')'
mibig = str(mibig)
else:
mibig = '.'
if IPR:
IP = ";".join(IPR)
else:
IP = '.'
if pFAM:
PF = ";".join(pFAM)
else:
PF = '.'
if goTerms:
GO = ";".join(goTerms)
else:
GO = '.'
if note:
No = ";".join(note)
else:
No = '.'
output.write("%s\t%s:%i-%i\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (name, chr, actualStart,
actualEnd, strand, location, enzyme, domains, product, cog, eggnogDesc, IP, PF, GO, No, mibig, prot_seq, DNA_seq))
# now put together into a single file
finallist = []
ClustersOut = os.path.join(
ResultsFolder, organism_name+'.clusters.txt')
for file in os.listdir(AntiSmashFolder):
if file.endswith('secmet.cluster.txt'):
file = os.path.join(AntiSmashFolder, file)
finallist.append(file)
with open(ClustersOut, 'w') as output:
for file in natsorted(finallist):
with open(file, 'r') as input:
output.write(input.read())
output.write('\n\n')
# write tsv annotation table
lib.log.info("Writing genome annotation table.")
#lib.annotationtable(final_gbk, FUNDB, final_annotation)
INTERPRO = lib.iprTSV2dict(os.path.join(FUNDB, 'interpro.tsv'), IPRterms)
lib.annotationtable(final_gbk, FUNDB, NoteHeaders, INTERPRO,
final_annotation)
# final wrap up message
if MustFixCount == 0 and PassedCounts == 0 and CurateCount == 0:
lib.log.info("Funannotate annotate has completed successfully!")
else:
lib.log.info("Funannotate annotate has completed successfully!\n\n\
We need YOUR help to improve gene names/product descriptions:\n\
{:,} gene/products names MUST be fixed, see {:}\n\
{:,} gene/product names need to be curated, see {:}\n\
{:,} gene/product names passed but are not in Database, see {:}\n\n\
Please consider contributing a PR at https://github.com/nextgenusfs/gene2product\n".format(MustFixCount, MustFixHelp, CurateCount, Gene2ProductHelp, PassedCounts, Gene2ProductPassed))
if MustFixCount > 0: # show user how to update
lib.log.info("To fix gene names/product deflines, manually fix or can remove in {:}\n\n\
funannotate annotate -i {:} --fix fixed_file.txt --remove delete.txt\n".format(MustFixHelp, args.input))
print("-------------------------------------------------------")
# move logfile to logfiles directory
if os.path.isfile(log_name):
if not os.path.isdir(os.path.join(outputdir, 'logfiles')):
os.makedirs(os.path.join(outputdir, 'logfiles'))
shutil.copyfile(log_name, os.path.join(
outputdir, 'logfiles', 'funannotate-annotate.log'))
os.remove(log_name)
if __name__ == "__main__":
main(sys.argv[1:])
|
nextgenusfs/funannotate
|
funannotate/annotate.py
|
Python
|
bsd-2-clause
| 80,841
|
[
"BLAST",
"Biopython"
] |
437c83128a8b91e35b1bf1f665add971c45f8340f7dcd62a8f3620672f6e6e95
|
import os.path as osp
import pickle
import numpy as np
from mastic.system import AssociationType
from mastic.molecule import MoleculeTypeAtomSelection
from mastic.interactions.hydrogen_bond import HydrogenBondType
inputs_path = "../examples/sEH-TPPU"
# load the SystemType we will add associations to
system_type_pkl_path = osp.realpath(osp.join(inputs_path, "sEH_TPPU_SystemType.pkl"))
with open(system_type_pkl_path, 'rb') as rf:
sEH_TPPU_SystemType = pickle.load(rf)
# substantiate a crystal structure system so we can figure out where
# the bidning site is
# load the coordinates for the members
member_coords = [np.load(osp.realpath(osp.join(inputs_path, 'TPPU_coords.npy'))),
np.load(osp.realpath(osp.join(inputs_path, 'sEH_coords.npy')))]
# substantiate the system
cryst_system = sEH_TPPU_SystemType.to_system(member_coords)
# find atoms in the binding site using a cutoff distance from the
# ligand
binding_site_cutoff_dist = 4 #in Angstroms \AA
# find the atoms within this distance
binding_site_atoms = cryst_system.molecules[0].atoms_within_distance(
binding_site_cutoff_dist)
# get the indices of these atoms to define the AssociationType
binding_site_atom_idxs = [cryst_system.molecules[1].atoms.index(atom) for
atom in binding_site_atoms]
# you might also want to get the pdb serial numbers so you can
# visually check to see where these atoms are
binding_site_atom_serials = [atom.atom_type.pdb_serial_number for atom
in binding_site_atoms]
# the selection map tells the association the index of the member and
# the indices of the atoms to include as one component of the
# association. By selection None as the indices no selection will be
# made and the whole molecule will be a component
selection_map = [(1, binding_site_atom_idxs), (1, binding_site_atom_idxs)]
# The selection types correspond to the elements in the selection map
# and tell the AssociationType what kind of selection to make on the
# molecule. Setting one of them to None should mean the selection map
# also had no indices selected and it should use the whole system
# member. The MoleculeTypeAtomSelection allows for selection of atoms in a
# Molecule or MoelculeType.
selection_types = [MoleculeTypeAtomSelection, MoleculeTypeAtomSelection]
# make the actual association
sehBS_sehBS_assoc = AssociationType("sEHBS-sEHBS",
system_type=sEH_TPPU_SystemType,
selection_map=selection_map,
selection_types=selection_types
)
# make inxclasses from this
BS_only_inxclasses = HydrogenBondType.interaction_classes(sehBS_sehBS_assoc)
# for comparison make inxclasses for the whole protein-protein
selection_map = [(1, None), (1, None)]
# The selection types correspond to the elements in the selection map
# and tell the AssociationType what kind of selection to make on the
# molecule. Setting one of them to None should mean the selection map
# also had no indices selected and it should use the whole system
# member. The MoleculeTypeAtomSelection allows for selection of atoms in a
# Molecule or MoelculeType.
selection_types = [None, None]
# make the actual association
seh_seh_assoc = AssociationType("sEH-sEH",
system_type=sEH_TPPU_SystemType,
selection_map=selection_map,
selection_types=selection_types
)
# make inxclasses from this
self_inxclasses = HydrogenBondType.interaction_classes(seh_seh_assoc)
g
|
salotz/mast
|
prototypes/test_self_assoc.py
|
Python
|
mit
| 3,686
|
[
"CRYSTAL"
] |
a62c4ed8157128da310cbc1692532f0b2fa5b90a45d2272ee8d140bf57b0555f
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Calculation of overlap population analysis based on cclib data."""
import random
import numpy
from .calculationmethod import Method
def func(x):
if x==1:
return 1
else:
return x+func(x-1)
class OPA(Method):
"""Overlap population analysis."""
def __init__(self, *args):
# Call the __init__ method of the superclass.
super(OPA, self).__init__(logname="OPA", *args)
def __str__(self):
"""Return a string representation of the object."""
return "OPA of" % (self.data)
def __repr__(self):
"""Return a representation of the object."""
return 'OPA("%s")' % (self.data)
def calculate(self, indices=None, fupdate=0.05):
"""Perform an overlap population analysis given the results of a parser"""
# Do we have the needed info in the ccData object?
if not hasattr(self.data, "mocoeffs") \
and not ( hasattr(self.data, "aooverlaps") \
or hasattr(self.data, "fooverlaps") ) \
and not hasattr(self.data, "nbasis"):
self.logger.error("Missing mocoeffs, aooverlaps/fooverlaps or nbasis")
return False #let the caller of function know we didn't finish
if not indices:
# Build list of groups of orbitals in each atom for atomresults.
if hasattr(self.data, "aonames"):
names = self.data.aonames
elif hasattr(self.data, "foonames"):
names = self.data.fonames
atoms = []
indices = []
name = names[0].split('_')[0]
atoms.append(name)
indices.append([0])
for i in range(1, len(names)):
name = names[i].split('_')[0]
try:
index = atoms.index(name)
except ValueError: #not found in atom list
atoms.append(name)
indices.append([i])
else:
indices[index].append(i)
# Determine number of steps, and whether process involves beta orbitals.
nfrag = len(indices) #nfrag
nstep = func(nfrag - 1)
unrestricted = (len(self.data.mocoeffs) == 2)
alpha = len(self.data.mocoeffs[0])
nbasis = self.data.nbasis
self.logger.info("Creating attribute results: array[4]")
results= [ numpy.zeros([nfrag, nfrag, alpha], "d") ]
if unrestricted:
beta = len(self.data.mocoeffs[1])
results.append(numpy.zeros([nfrag, nfrag, beta], "d"))
nstep *= 2
if hasattr(self.data, "aooverlaps"):
overlap = self.data.aooverlaps
elif hasattr(self.data,"fooverlaps"):
overlap = self.data.fooverlaps
#intialize progress if available
if self.progress:
self.progress.initialize(nstep)
size = len(self.data.mocoeffs[0])
step = 0
preresults = []
for spin in range(len(self.data.mocoeffs)):
two = numpy.array([2.0]*len(self.data.mocoeffs[spin]),"d")
# OP_{AB,i} = \sum_{a in A} \sum_{b in B} 2 c_{ai} c_{bi} S_{ab}
for A in range(len(indices)-1):
for B in range(A+1, len(indices)):
if self.progress: #usually only a handful of updates, so remove random part
self.progress.update(step, "Overlap Population Analysis")
for a in indices[A]:
ca = self.data.mocoeffs[spin][:,a]
for b in indices[B]:
cb = self.data.mocoeffs[spin][:,b]
temp = ca * cb * two *overlap[a,b]
results[spin][A,B] = numpy.add(results[spin][A,B],temp)
results[spin][B,A] = numpy.add(results[spin][B,A],temp)
step += 1
temparray2 = numpy.swapaxes(results[0],1,2)
self.results = [ numpy.swapaxes(temparray2,0,1) ]
if unrestricted:
temparray2 = numpy.swapaxes(results[1],1,2)
self.results.append(numpy.swapaxes(temparray2, 0, 1))
if self.progress:
self.progress.update(nstep, "Done")
return True
if __name__ == "__main__":
import doctest, opa
doctest.testmod(opa, verbose=False)
|
Schamnad/cclib
|
src/cclib/method/opa.py
|
Python
|
bsd-3-clause
| 4,773
|
[
"cclib"
] |
62e397596f968fef30b311a675c88d002b7fd8ccae06af25fc16d71c3f6d5eb7
|
import argparse
import pandas as pd
import re
from typing import List, Match, Dict, TextIO, Union
from datetime import date
# In this file, a "feature" refers to the collection of data between the > keys of the bprom output.
# That collection of data refers to one section of the DNA upstream of a gene
def read_bprom_file(bprom_file) -> List[str]:
"""Reads in file, creating a list of strings with each list element containing a line from the file"""
contents = []
with open(bprom_file) as file:
for line in file:
contents.append(line)
return contents
def concatenate_then_split(contents) -> List[str]:
"""Concatenates the file into one large string.
Then splits it on '>' so that each feature's data is together in one element"""
# Concatenates the entire file into one large string
concat_contents = ''.join(contents)
# Removing the empty string '' at element 0 used to make the join
concat_contents = concat_contents[1:]
# Splits the file into a list of strings on ">"
features = concat_contents.split('>')
return features
def remove_promoterless_features(features) -> List[str]:
"""For each concatenated feature string passed, removes the element
if the # of predicted promoters is 0."""
cleaned_features = features
indices_to_delete = []
for i, feature in enumerate(cleaned_features):
if "Number of predicted promoters - 0" in cleaned_features[i]:
indices_to_delete.append(i)
# Must delete in reverse order, otherwise it changes the list indices after
# the element deleted, and you delete subsequent elements at i+1, i+2, i+3, etc
for i in sorted(indices_to_delete, reverse=True):
del cleaned_features[i]
return cleaned_features
def extract_accession(feature) -> str:
"""Extract accession"""
accession = re.search('[\w](.*)(?=_)', feature)
accession = accession.group().replace('_', '').strip()
return accession
def extract_test_seq_position(feature) -> List[str]:
"""Extract position in genome. Gets any number of values '(.*)' between the brackets
using 'lookbehind/lookright' (?<=PATTERN) and 'lookahead/lookleft' regex assertions
to extract (?<=Location=\\[)(.*)(?=]\\()"""
location = re.search('(?<=Location=\\[)(.*)(?=]\\()', feature)
location = location.group().split(':')
return location
def extract_strand_direction(feature) -> str:
"""Extract strand direction for a feature, - or +"""
# Matches for '(.)'
direction = re.search('(?<=\\().(?=\\))', feature)
direction = direction.group()
return direction
def extract_promoter_data(feature) -> Dict[str, str]:
"""Extracts all promoter data using regular expressions.
Use for one element in the output of concatenate_then_split()"""
# Extract promoter -10 and -35 sequences and scores
# Gets everything between "-xx box at pos." and " Score"
minus10 = re.search('(?<=-10 box at pos.)(.*)(?= Score)(.*)', feature)
minus35 = re.search('(?<=-35 box at pos.)(.*)(?= Score)(.*)', feature)
# Extracts the match and removes leading and trailing whitespace (which can be variable)
# (the bprom output does not maintain the same # of whitespace characters
# if there are less digits, at least for the scoring)
minus10 = minus10.group().lstrip().split(' ')
minus10_pos = int(minus10[0])
minus10_seq = minus10[1]
minus10_score = minus10[-1]
minus35 = minus35.group().lstrip().split(' ')
minus35_pos = int(minus35[0])
minus35_seq = minus35[1]
minus35_score = minus35[-1]
# Can change these keys to change the column 9
promoter_data = {
'minus10_pos': minus10_pos,
'minus10_seq': minus10_seq,
'minus10_score': minus10_score,
'minus35_pos': minus35_pos,
'minus35_seq': minus35_seq,
'minus35_score': minus35_score
}
return promoter_data
def convert_extracted_promoter_data_to_ID_column_format(
promoter_data,
calculated_promoter_positions) -> str:
"""Converts input data to the GFF3 ID column (column 9) format, a semicolon separated
list of values providing additional information about each feature"""
# Replaces the BPROM output positions with the calculated ones
minus_10_calculated = calculated_promoter_positions[2]
minus_35_calculated = calculated_promoter_positions[3]
promoter_data['minus10_pos'] = minus_10_calculated
promoter_data['minus35_pos'] = minus_35_calculated
# Creates the column 9 string (attributes)
promoter_data = ['{}={}'.format(key, value) for key, value in promoter_data.items()]
promoter_data = 'Description=Predicted promoter data;' + 'Note=' + ','.join(promoter_data) + ';'
return promoter_data
def extract_LDF_score(feature) -> str:
"""Extract LDF score"""
LDF = re.search('(?<=LDF-)(.*)', feature)
LDF = LDF.group().strip()
return LDF
def calculate_promoter_position(feature):
"""Calculate promoter positions (in the context of the genome) based on BPROM predictions."""
# Get 'Promoter Pos: X' data. This refers to the predicted transcriptional start site!
promoter_pos = re.search('(?<=Promoter Pos:)(.*)(?=LDF)', feature)
promoter_pos = int(promoter_pos.group().strip())
# Get start and end positions from 'Location=[XXX:YYYY]'
test_seq_position = extract_test_seq_position(feature)
test_cds_location_start_pos = int(test_seq_position[0])
test_cds_location_end_pos = int(test_seq_position[1])
promoter_data = extract_promoter_data(feature)
''' IMPORTANT!! Whether or not you add or subtract to calculate the promoter start
# position depends on whether we're on the + or - strand!
# The workflow Jolene uses is smart enough to correctly pull upstream
# for both + and - strands (i.e., pulls left for +, pulls right for -)
# THEREFORE, for a gene with a start at 930 on the + strand, it pulls 830:930
# And for a gene with a start at 930 on the - strand, it pulls 930:1030 '''
direction = extract_strand_direction(feature)
if direction == '+':
# BPROM starts counting from the LEFT boundary for + strand test sequences (as expected)
# Get -10 promoter position
minus10_pos = promoter_data['minus10_pos']
minus10_pos_in_context_of_genome = test_cds_location_start_pos + minus10_pos
# Get -35 promoter position
minus35_pos = promoter_data['minus35_pos']
minus35_pos_in_context_of_genome = test_cds_location_start_pos + minus35_pos
start = test_cds_location_start_pos + minus35_pos
end = test_cds_location_start_pos + promoter_pos
calculated_promoter_positions = [
start, end, minus10_pos_in_context_of_genome, minus35_pos_in_context_of_genome]
return calculated_promoter_positions
elif direction == '-':
# BPROM starts counting from the RIGHT boundary for - strand test sequences
# Get -10 promoter position
minus10_pos = promoter_data['minus10_pos']
minus10_pos_in_context_of_genome = test_cds_location_end_pos - minus10_pos
# Get -35 promoter position
minus35_pos = promoter_data['minus35_pos']
minus35_pos_in_context_of_genome = test_cds_location_end_pos - minus35_pos
# The start and end are reversed
end = test_cds_location_end_pos - minus35_pos
start = test_cds_location_end_pos - promoter_pos
calculated_promoter_positions = [
start, end, minus10_pos_in_context_of_genome, minus35_pos_in_context_of_genome]
return calculated_promoter_positions
else:
assert "Error: Strand data neither \'+\' nor \'-\'"
def extract_tf_binding_elements():
"""Extract predicted transcription factor binding elements"""
return
def extract_data_for_all_features(features) -> List[List[Union[str, int]]]:
"""Loops through cleaned bprom output extracting all data of interest and builds the
structure for loading into a dataframe"""
extracted_data = []
for feature in features:
# loop through features, a List[str] containing each feature [str] in the
# original bprom format as a single string, but cleaned of irrelevant data
calculated_promoter_positions = calculate_promoter_position(feature)
promoter_data = extract_promoter_data(feature)
promoter_data_converted = convert_extracted_promoter_data_to_ID_column_format(
promoter_data, calculated_promoter_positions)
extracted_data.append(
[extract_accession(feature), # Seqid, col 1
'bprom', # Source, col 2
'promoter', # Type, col 3
calculated_promoter_positions[0], # Start, col 4
calculated_promoter_positions[1], # End, col 5
extract_LDF_score(feature), # Score, col 6
extract_strand_direction(feature), # Strand direction, col 7
'.', # Phase, col 8
promoter_data_converted, # Attributes, col 9
])
return extracted_data
def convert_to_dataframe(extracted_data) -> pd.DataFrame:
"""Convert extracted and processed data to Pandas dataframe with gff3 column names"""
df = pd.DataFrame(extracted_data,
columns=['seqid', 'source', 'type', 'start', 'end',
'score', 'strand', 'phase', 'attributes']
)
return df
def write_to_gff3(dataframe) -> None:
"""Create a gff3 text file from the DataFrame by converting to a tab separated values (tsv) file"""
tsv = dataframe.to_csv(sep='\t', index=False, header=None)
# Gets the first element of the first column to use for
accession = dataframe.iloc[0][0]
year, month, day = date.today().year, date.today().month, date.today().day
#with open(f'{year}_{month}_{day}_bprom_as_gff3_{accession}.txt', 'w') as wf:
# Header so Galaxy can recognize as GFF3
print('##gff-version 3\n')
#for line in tsv:
print(tsv)
return
def convert_bprom_output_to_gff3(bprom_file) -> None:
"""Master function. Given a BPROM .txt file as output, extracts data and writes as a GFF3 file"""
bprom_file = read_bprom_file(bprom_file)
concatenated_bprom_file = concatenate_then_split(bprom_file)
working_file = remove_promoterless_features(concatenated_bprom_file)
extracted_data = extract_data_for_all_features(working_file)
gff3_dataframe = convert_to_dataframe(extracted_data)
# Create the gff3 text file
write_to_gff3(gff3_dataframe)
return
if __name__ == '__main__':
## Shows the DataFrame output in the terminal for testing/debugging
# bprom_file = read_bprom_file('BPROM_output.txt')
# concatenated_bprom_file: List[str] = concatenate_then_split(bprom_file)
# working_file = remove_promoterless_features(concatenated_bprom_file)
# print(convert_to_dataframe(extract_data_for_all_features(working_file)).to_string())
parser = argparse.ArgumentParser(
description='converts BPROM output to the gff3 file format')
parser.add_argument('-f', help='bprom file as .txt')
args = parser.parse_args()
# Actual function for converting the BPROM output to gff3
convert_bprom_output_to_gff3(args.f)
# Upload to cpt github in the directory Galaxy-Tools/tools/external/
|
TAMU-CPT/galaxy-tools
|
tools/external/bprom_gff3_converter.py
|
Python
|
gpl-3.0
| 11,702
|
[
"Galaxy"
] |
f262a48024ede8051ce736ab9251a9aeb0fb3df019b4c5d8f9fb635fc531d1cb
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflow_v2.services.answer_records import AnswerRecordsAsyncClient
from google.cloud.dialogflow_v2.services.answer_records import AnswerRecordsClient
from google.cloud.dialogflow_v2.services.answer_records import pagers
from google.cloud.dialogflow_v2.services.answer_records import transports
from google.cloud.dialogflow_v2.types import answer_record
from google.cloud.dialogflow_v2.types import answer_record as gcd_answer_record
from google.cloud.dialogflow_v2.types import participant
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert AnswerRecordsClient._get_default_mtls_endpoint(None) is None
assert (
AnswerRecordsClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
AnswerRecordsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
AnswerRecordsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
AnswerRecordsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
AnswerRecordsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [AnswerRecordsClient, AnswerRecordsAsyncClient,]
)
def test_answer_records_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.AnswerRecordsGrpcTransport, "grpc"),
(transports.AnswerRecordsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_answer_records_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [AnswerRecordsClient, AnswerRecordsAsyncClient,]
)
def test_answer_records_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_answer_records_client_get_transport_class():
transport = AnswerRecordsClient.get_transport_class()
available_transports = [
transports.AnswerRecordsGrpcTransport,
]
assert transport in available_transports
transport = AnswerRecordsClient.get_transport_class("grpc")
assert transport == transports.AnswerRecordsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(AnswerRecordsClient, transports.AnswerRecordsGrpcTransport, "grpc"),
(
AnswerRecordsAsyncClient,
transports.AnswerRecordsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
AnswerRecordsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AnswerRecordsClient),
)
@mock.patch.object(
AnswerRecordsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AnswerRecordsAsyncClient),
)
def test_answer_records_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(AnswerRecordsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(AnswerRecordsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(AnswerRecordsClient, transports.AnswerRecordsGrpcTransport, "grpc", "true"),
(
AnswerRecordsAsyncClient,
transports.AnswerRecordsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(AnswerRecordsClient, transports.AnswerRecordsGrpcTransport, "grpc", "false"),
(
AnswerRecordsAsyncClient,
transports.AnswerRecordsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
AnswerRecordsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AnswerRecordsClient),
)
@mock.patch.object(
AnswerRecordsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AnswerRecordsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_answer_records_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [AnswerRecordsClient, AnswerRecordsAsyncClient]
)
@mock.patch.object(
AnswerRecordsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AnswerRecordsClient),
)
@mock.patch.object(
AnswerRecordsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AnswerRecordsAsyncClient),
)
def test_answer_records_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(AnswerRecordsClient, transports.AnswerRecordsGrpcTransport, "grpc"),
(
AnswerRecordsAsyncClient,
transports.AnswerRecordsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_answer_records_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
AnswerRecordsClient,
transports.AnswerRecordsGrpcTransport,
"grpc",
grpc_helpers,
),
(
AnswerRecordsAsyncClient,
transports.AnswerRecordsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_answer_records_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_answer_records_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflow_v2.services.answer_records.transports.AnswerRecordsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = AnswerRecordsClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
AnswerRecordsClient,
transports.AnswerRecordsGrpcTransport,
"grpc",
grpc_helpers,
),
(
AnswerRecordsAsyncClient,
transports.AnswerRecordsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_answer_records_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=None,
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [answer_record.ListAnswerRecordsRequest, dict,]
)
def test_list_answer_records(request_type, transport: str = "grpc"):
client = AnswerRecordsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_answer_records), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = answer_record.ListAnswerRecordsResponse(
next_page_token="next_page_token_value",
)
response = client.list_answer_records(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == answer_record.ListAnswerRecordsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAnswerRecordsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_answer_records_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AnswerRecordsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_answer_records), "__call__"
) as call:
client.list_answer_records()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == answer_record.ListAnswerRecordsRequest()
@pytest.mark.asyncio
async def test_list_answer_records_async(
transport: str = "grpc_asyncio", request_type=answer_record.ListAnswerRecordsRequest
):
client = AnswerRecordsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_answer_records), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
answer_record.ListAnswerRecordsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_answer_records(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == answer_record.ListAnswerRecordsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAnswerRecordsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_answer_records_async_from_dict():
await test_list_answer_records_async(request_type=dict)
def test_list_answer_records_field_headers():
client = AnswerRecordsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = answer_record.ListAnswerRecordsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_answer_records), "__call__"
) as call:
call.return_value = answer_record.ListAnswerRecordsResponse()
client.list_answer_records(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_answer_records_field_headers_async():
client = AnswerRecordsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = answer_record.ListAnswerRecordsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_answer_records), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
answer_record.ListAnswerRecordsResponse()
)
await client.list_answer_records(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_answer_records_flattened():
client = AnswerRecordsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_answer_records), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = answer_record.ListAnswerRecordsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_answer_records(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_answer_records_flattened_error():
client = AnswerRecordsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_answer_records(
answer_record.ListAnswerRecordsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_answer_records_flattened_async():
client = AnswerRecordsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_answer_records), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = answer_record.ListAnswerRecordsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
answer_record.ListAnswerRecordsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_answer_records(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_answer_records_flattened_error_async():
client = AnswerRecordsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_answer_records(
answer_record.ListAnswerRecordsRequest(), parent="parent_value",
)
def test_list_answer_records_pager(transport_name: str = "grpc"):
client = AnswerRecordsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_answer_records), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
answer_record.ListAnswerRecordsResponse(
answer_records=[
answer_record.AnswerRecord(),
answer_record.AnswerRecord(),
answer_record.AnswerRecord(),
],
next_page_token="abc",
),
answer_record.ListAnswerRecordsResponse(
answer_records=[], next_page_token="def",
),
answer_record.ListAnswerRecordsResponse(
answer_records=[answer_record.AnswerRecord(),], next_page_token="ghi",
),
answer_record.ListAnswerRecordsResponse(
answer_records=[
answer_record.AnswerRecord(),
answer_record.AnswerRecord(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_answer_records(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, answer_record.AnswerRecord) for i in results)
def test_list_answer_records_pages(transport_name: str = "grpc"):
client = AnswerRecordsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_answer_records), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
answer_record.ListAnswerRecordsResponse(
answer_records=[
answer_record.AnswerRecord(),
answer_record.AnswerRecord(),
answer_record.AnswerRecord(),
],
next_page_token="abc",
),
answer_record.ListAnswerRecordsResponse(
answer_records=[], next_page_token="def",
),
answer_record.ListAnswerRecordsResponse(
answer_records=[answer_record.AnswerRecord(),], next_page_token="ghi",
),
answer_record.ListAnswerRecordsResponse(
answer_records=[
answer_record.AnswerRecord(),
answer_record.AnswerRecord(),
],
),
RuntimeError,
)
pages = list(client.list_answer_records(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_answer_records_async_pager():
client = AnswerRecordsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_answer_records),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
answer_record.ListAnswerRecordsResponse(
answer_records=[
answer_record.AnswerRecord(),
answer_record.AnswerRecord(),
answer_record.AnswerRecord(),
],
next_page_token="abc",
),
answer_record.ListAnswerRecordsResponse(
answer_records=[], next_page_token="def",
),
answer_record.ListAnswerRecordsResponse(
answer_records=[answer_record.AnswerRecord(),], next_page_token="ghi",
),
answer_record.ListAnswerRecordsResponse(
answer_records=[
answer_record.AnswerRecord(),
answer_record.AnswerRecord(),
],
),
RuntimeError,
)
async_pager = await client.list_answer_records(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, answer_record.AnswerRecord) for i in responses)
@pytest.mark.asyncio
async def test_list_answer_records_async_pages():
client = AnswerRecordsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_answer_records),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
answer_record.ListAnswerRecordsResponse(
answer_records=[
answer_record.AnswerRecord(),
answer_record.AnswerRecord(),
answer_record.AnswerRecord(),
],
next_page_token="abc",
),
answer_record.ListAnswerRecordsResponse(
answer_records=[], next_page_token="def",
),
answer_record.ListAnswerRecordsResponse(
answer_records=[answer_record.AnswerRecord(),], next_page_token="ghi",
),
answer_record.ListAnswerRecordsResponse(
answer_records=[
answer_record.AnswerRecord(),
answer_record.AnswerRecord(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_answer_records(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [gcd_answer_record.UpdateAnswerRecordRequest, dict,]
)
def test_update_answer_record(request_type, transport: str = "grpc"):
client = AnswerRecordsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_answer_record), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_answer_record.AnswerRecord(
name="name_value",
agent_assistant_record=gcd_answer_record.AgentAssistantRecord(
article_suggestion_answer=participant.ArticleAnswer(title="title_value")
),
)
response = client.update_answer_record(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_answer_record.UpdateAnswerRecordRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_answer_record.AnswerRecord)
assert response.name == "name_value"
def test_update_answer_record_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AnswerRecordsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_answer_record), "__call__"
) as call:
client.update_answer_record()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_answer_record.UpdateAnswerRecordRequest()
@pytest.mark.asyncio
async def test_update_answer_record_async(
transport: str = "grpc_asyncio",
request_type=gcd_answer_record.UpdateAnswerRecordRequest,
):
client = AnswerRecordsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_answer_record), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_answer_record.AnswerRecord(name="name_value",)
)
response = await client.update_answer_record(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_answer_record.UpdateAnswerRecordRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_answer_record.AnswerRecord)
assert response.name == "name_value"
@pytest.mark.asyncio
async def test_update_answer_record_async_from_dict():
await test_update_answer_record_async(request_type=dict)
def test_update_answer_record_field_headers():
client = AnswerRecordsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_answer_record.UpdateAnswerRecordRequest()
request.answer_record.name = "answer_record.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_answer_record), "__call__"
) as call:
call.return_value = gcd_answer_record.AnswerRecord()
client.update_answer_record(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"answer_record.name=answer_record.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_answer_record_field_headers_async():
client = AnswerRecordsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_answer_record.UpdateAnswerRecordRequest()
request.answer_record.name = "answer_record.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_answer_record), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_answer_record.AnswerRecord()
)
await client.update_answer_record(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"answer_record.name=answer_record.name/value",
) in kw["metadata"]
def test_update_answer_record_flattened():
client = AnswerRecordsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_answer_record), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_answer_record.AnswerRecord()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_answer_record(
answer_record=gcd_answer_record.AnswerRecord(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].answer_record
mock_val = gcd_answer_record.AnswerRecord(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_answer_record_flattened_error():
client = AnswerRecordsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_answer_record(
gcd_answer_record.UpdateAnswerRecordRequest(),
answer_record=gcd_answer_record.AnswerRecord(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_answer_record_flattened_async():
client = AnswerRecordsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_answer_record), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_answer_record.AnswerRecord()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_answer_record.AnswerRecord()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_answer_record(
answer_record=gcd_answer_record.AnswerRecord(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].answer_record
mock_val = gcd_answer_record.AnswerRecord(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_answer_record_flattened_error_async():
client = AnswerRecordsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_answer_record(
gcd_answer_record.UpdateAnswerRecordRequest(),
answer_record=gcd_answer_record.AnswerRecord(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.AnswerRecordsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AnswerRecordsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.AnswerRecordsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AnswerRecordsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.AnswerRecordsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = AnswerRecordsClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = AnswerRecordsClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.AnswerRecordsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AnswerRecordsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.AnswerRecordsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = AnswerRecordsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.AnswerRecordsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.AnswerRecordsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.AnswerRecordsGrpcTransport,
transports.AnswerRecordsGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = AnswerRecordsClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.AnswerRecordsGrpcTransport,)
def test_answer_records_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.AnswerRecordsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_answer_records_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflow_v2.services.answer_records.transports.AnswerRecordsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.AnswerRecordsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_answer_records",
"update_answer_record",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_answer_records_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflow_v2.services.answer_records.transports.AnswerRecordsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AnswerRecordsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_answer_records_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflow_v2.services.answer_records.transports.AnswerRecordsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AnswerRecordsTransport()
adc.assert_called_once()
def test_answer_records_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
AnswerRecordsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.AnswerRecordsGrpcTransport,
transports.AnswerRecordsGrpcAsyncIOTransport,
],
)
def test_answer_records_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.AnswerRecordsGrpcTransport, grpc_helpers),
(transports.AnswerRecordsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_answer_records_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.AnswerRecordsGrpcTransport,
transports.AnswerRecordsGrpcAsyncIOTransport,
],
)
def test_answer_records_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_answer_records_host_no_port():
client = AnswerRecordsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_answer_records_host_with_port():
client = AnswerRecordsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_answer_records_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.AnswerRecordsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_answer_records_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.AnswerRecordsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.AnswerRecordsGrpcTransport,
transports.AnswerRecordsGrpcAsyncIOTransport,
],
)
def test_answer_records_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.AnswerRecordsGrpcTransport,
transports.AnswerRecordsGrpcAsyncIOTransport,
],
)
def test_answer_records_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_answer_record_path():
project = "squid"
answer_record = "clam"
expected = "projects/{project}/answerRecords/{answer_record}".format(
project=project, answer_record=answer_record,
)
actual = AnswerRecordsClient.answer_record_path(project, answer_record)
assert expected == actual
def test_parse_answer_record_path():
expected = {
"project": "whelk",
"answer_record": "octopus",
}
path = AnswerRecordsClient.answer_record_path(**expected)
# Check that the path construction is reversible.
actual = AnswerRecordsClient.parse_answer_record_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = AnswerRecordsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = AnswerRecordsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = AnswerRecordsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder,)
actual = AnswerRecordsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = AnswerRecordsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = AnswerRecordsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization,)
actual = AnswerRecordsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = AnswerRecordsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = AnswerRecordsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project,)
actual = AnswerRecordsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = AnswerRecordsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = AnswerRecordsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = AnswerRecordsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = AnswerRecordsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = AnswerRecordsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.AnswerRecordsTransport, "_prep_wrapped_messages"
) as prep:
client = AnswerRecordsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.AnswerRecordsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = AnswerRecordsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = AnswerRecordsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = AnswerRecordsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = AnswerRecordsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(AnswerRecordsClient, transports.AnswerRecordsGrpcTransport),
(AnswerRecordsAsyncClient, transports.AnswerRecordsGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-dialogflow
|
tests/unit/gapic/dialogflow_v2/test_answer_records.py
|
Python
|
apache-2.0
| 72,052
|
[
"Octopus"
] |
bf63ca5788a6b0f1d241300274e690bc5a979508271de40c66848b853f252a8a
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Rsem(MakefilePackage):
"""RSEM is a software package for estimating gene and isoform expression
levels from RNA-Seq data."""
homepage = "http://deweylab.github.io/RSEM/"
url = "https://github.com/deweylab/RSEM/archive/v1.3.0.tar.gz"
version('1.3.0', '9728161625d339d022130e2428604bf5')
depends_on('r', type=('build', 'run'))
depends_on('perl', type=('build', 'run'))
depends_on('python', type=('build', 'run'))
depends_on('bowtie')
depends_on('bowtie2')
depends_on('star')
def install(self, spec, prefix):
make('install', 'DESTDIR=%s' % prefix, 'prefix=')
|
wscullin/spack
|
var/spack/repos/builtin/packages/rsem/package.py
|
Python
|
lgpl-2.1
| 1,886
|
[
"Bowtie"
] |
3db078e2c31ec33795af9ae8715eb96e6b3efc6d77573fe9a1dcfac70a3870d1
|
# In the game, Monopoly, the standard board is set up in the
# following way:
# GO A1 CC1 A2 T1 R1 B1 CH1 B2 B3 JAIL
# H2 C1
# T2 U1
# H1 C2
# CH3 C3
# R4 R2
# G3 D1
# CC3 CC2
# G2 D2
# G1 D3
# G2J F3 U2 F2 F1 R3 E3 E2 CH2 E1 FP
# A player starts on the GO square and adds the scores on two
# 6-sided dice to determine the number of squares they advance
# in a clockwise direction. Without any further rules we would
# expect to visit each square with equal probability: 2.5%.
# However, landing on G2J (Go To Jail), CC (community chest),
# and CH (chance) changes this distribution.
# In addition to G2J, and one card from each of CC and CH, that
# orders the player to go directly to jail, if a player rolls
# three consecutive doubles, they do not advance the result of
# their 3rd roll. Instead they proceed directly to jail.
# At the beginning of the game, the CC and CH cards are shuffled.
# When a player lands on CC or CH they take a card from the top
# of the respective pile and, after following the instructions,
# it is returned to the bottom of the pile. There are sixteen
# cards in each pile, but for the purpose of this problem we are
# only concerned with cards that order a movement; any instruction
# not concerned with movement will be ignored and the player will
# remain on the CC/CH square.
# * Community Chest (2/16 cards):
# 1. Advance to GO
# 2. Go to JAIL
# * Chance (10/16 cards):
# 1. Advance to GO
# 2. Go to JAIL
# 3. Go to C1
# 4. Go to E3
# 5. Go to H2
# 6. Go to R1
# 7. Go to next R (railway company)
# 8. Go to next R
# 9. Go to next U (utility company)
# 10. Go back 3 squares.
# The heart of this problem concerns the likelihood of visiting a
# particular square. That is, the probability of finishing at that
# square after a roll. For this reason it should be clear that, with
# the exception of G2J for which the probability of finishing on it
# is zero, the CH squares will have the lowest probabilities, as 5 / 8
# request a movement to another square, and it is the final square
# that the player finishes at on each roll that we are interested in.
# We shall make no distinction between "Just Visiting" and being sent
# to JAIL, and we shall also ignore the rule about requiring a double
# to "get out of jail", assuming that they pay to get out on their
# next turn.
# By starting at GO and numbering the squares sequentially from 00
# to 39 we can concatenate these two-digit numbers to produce strings
# that correspond with sets of squares.
# Statistically it can be shown that the three most popular squares,
# in order, are JAIL (6.24%) = Square 10, E3 (3.18%) = Square 24,
# and GO (3.09%) = Square 00. So these three most popular squares
# can be listed with the six-digit modal string: 102400.
# If, instead of using two 6-sided dice, two 4-sided dice are used,
# find the six-digit modal string.
from random import randrange
GO = 0
JAIL = 10
G2J = 30
CC1 = 2
CC2 = 17
CC3 = 33
CH1 = 7
CH2 = 22
CH3 = 36
C1 = 11
E3 = 24
H2 = 39
R1 = 5
U1 = 12
U2 = 28
dice_size = 4
iterations = 1000000
i = 0
double_dice = 0
pos = GO
matrix = [0 for i in range(40)]
CCpos = 0
CHpos = 0
CCcards = [GO, JAIL]
CHcards = [GO, JAIL, C1, E3, H2, R1]
while i < iterations:
i += 1
dice1 = randrange(dice_size) + 1
dice2 = randrange(dice_size) + 1
double_dice = double_dice + 1 if dice1 == dice2 else 0
if double_dice == 3:
pos = JAIL
matrix[pos] += 1
continue
pos = (pos + dice1 + dice2) % 40
if pos == G2J:
pos = JAIL
elif pos == CC1 or pos == CC2 or pos == CC3:
if CCpos < 2:
pos = CCcards[CCpos]
CCpos = (CCpos + 1) % 16
elif pos == CH1 or pos == CH2 or pos == CH3:
if CHpos < 6:
pos = CHcards[CHpos]
elif CHpos == 6 or CHpos == 7:
pos = ((pos + 5) / 10 * 10 + 5) % 40
elif CHpos == 8:
if pos >= U1 and pos < U2:
pos = U2
else:
pos = U1
elif CHpos == 9:
pos -= 3
CHpos = (CHpos + 1) % 16
matrix[pos] += 1
tmp = {i: matrix[i] for i in range(40)}
ans = [y[0] for y in sorted(tmp.items(), key=lambda x: x[1], reverse=True)[:3]]
print ''.join([str(n).zfill(2) for n in ans])
|
cloudzfy/euler
|
src/84.py
|
Python
|
mit
| 4,488
|
[
"VisIt"
] |
a15a3badb796be244c5e4d550c4a94efb1a9156ec125c909fef4e5526e7560eb
|
tests=[
("python","UnitTestBuildComposite.py",{}),
("python","UnitTestScreenComposite.py",{}),
("python","UnitTestAnalyzeComposite.py",{}),
]
for dir in ['Cluster','Composite','Data','DecTree','Descriptors','FeatureSelect','InfoTheory','KNN','ModelPackage','NaiveBayes','Neural','SLT']:
tests.append(('python','test_list.py',{'dir':dir}))
longTests=[
]
if __name__=='__main__':
import sys
from rdkit import TestRunner
failed,tests = TestRunner.RunScript('test_list.py',0,1)
sys.exit(len(failed))
|
rdkit/rdkit-orig
|
rdkit/ML/test_list.py
|
Python
|
bsd-3-clause
| 523
|
[
"RDKit"
] |
6eb514ecca5ad02dfe45e3993258623c590e67fcd5b3604bde8d0d5c5d4bcd85
|
#!/usr/bin/env python
import sys
import pysam
import Script
def usage(what=None):
if what == "bedmode":
sys.stderr.write("""chromCoverage.py - Bedmode: report coverage in a set of regions
Usage: chromCoverage.py [-o F] -b B [-a] bamfile
Read regions from BED file B, and write to standard output (or to F if -o is specified)
a tab-delimited file with one row for each region and the following columns:
chrom start end nreads nbases avgcov
where nreads is the number of reads that (partially) overlap this region; nbases is the
sum of all bases from the reads overlapping this region; avgcov is the nbases divided by
the length of the region. If a read partially overlaps the region, it will be counted
fractionally: for example, if a 100bp read has an overlap of 40 with the region, it will
be counted as 0.4 reads.
If the -a option is specified, each row in the output file will consist of the entire
row from the original BED file followed by the three columns nreads, nbases, avgcov.
""")
else:
sys.stderr.write("""chromCoverage.py - Report per-chromosome coverage.
Usage: chromCoverage.py [-o F] [-m MIN] [-c CHROM] bamfile
chromCoverage.py [-o F] [-m MIN] [coveragefile]
chromCoverage.py [-o F] -b B [-a] bamfile
chromCoverage.py [-o F] -x coveragefiles...
Read coverage data from a BAM file or from a coveragefile (or standard input
if not filename is provided) and write by-chromosome coverage data to standard
output. The coveragefile should be in the format produced by the bamtools 'coverage'
command:
chrom position depth
The output file contains six columns:
chrom total length coverage efflen effcov
total - sum of depth at all positions
length - observed chromosome length (highest position)
coverage - ratio between total and length
efflen - number of bases having depth >= MIN
effperc - percent of bases having depth >= MIN
effcov - ratio between total and efflen
Options:
-h, --help | Print this usage message.
-v | Print version number.
-o F | Write output to file F (default: stdout).
-m MIN | only positions with depth over MIN are considered when
computing 'total' and 'efflen' (default: {}).
-c CHROM | only output data for chromosome CHROM (default: all chromosomes).
-x | combine multiple output files into a single coverage file.
-b B | Annotate BED file B (see -h bedmode)
""".format(Cov.mincov))
P = Script.Script("chromCoverage.py", version="1.0", usage=usage)
class Cov():
chrom = ""
wanted = None
outfile = None
out = sys.stdout
mincov = 0
total = 0
maxpos = 0
effbases = 0
bedfile = None
allcols = False # If true, output all columns from BED file
def dump(self):
print("total={}, maxpos={}, effbases={}".format(self.total, self.maxpos, self.effbases))
def report(self):
if self.chrom != "":
if self.maxpos > 0 and self.effbases > 0:
self.out.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(self.chrom, self.total, self.maxpos, 1.0*self.total/self.maxpos,
self.effbases, round(100.0*self.effbases/self.maxpos, 1),
1.0*self.total/self.effbases))
else:
self.out.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(self.chrom, self.total, self.maxpos, 0, self.effbases, 0, 0))
self.total = 0
self.maxpos = 0
self.effbases = 0
def add(self, chrom, pos, cov, tot):
if chrom != self.chrom:
self.update(tot)
self.report()
# print("Started chrom {}".format(chrom))
self.chrom = chrom
self.maxpos = pos
self.effbases += 1
self.total += cov
def update(self, other):
other.maxpos += self.maxpos
other.effbases += self.effbases
other.total += self.total
def doCovFile(self, stream, Tot):
try:
self.out.write("#Chrom\tTotal\tLength\tCoverage\tEfflen\tEffperc\tEffcov\n")
while True:
line = stream.readline()
if not line:
break
parsed = line.split("\t")
if len(parsed) > 2:
chrom = parsed[0]
pos = int(parsed[1])
cov = int(parsed[2])
if cov > self.mincov:
self.add(chrom, pos, cov, Tot)
self.update(Tot)
self.report()
Tot.report()
finally:
stream.close()
def doBAMfile(self, filename, Tot):
bf = pysam.AlignmentFile(filename, "rb" )
self.out.write("#Chrom\tTotal\tLength\tCoverage\tEfflen\tEffperc\tEffcov\n")
try:
try:
for pileupcolumn in bf.pileup(self.wanted):
if pileupcolumn.n > self.mincov:
refname = bf.getrname(pileupcolumn.reference_id)
# self.add(pileupcolumn.reference_name, pileupcolumn.pos, pileupcolumn.n, Tot)
self.add(refname, pileupcolumn.pos, pileupcolumn.n, Tot)
except KeyboardInterrupt:
pass
except ValueError:
pass
self.update(Tot)
self.report()
Tot.report()
finally:
bf.close()
def doBEDfile(self, bamfile, bedfile):
bf = pysam.AlignmentFile(bamfile, "rb")
self.out.write("#Chrom\tStart\tEnd\tReads\tBases\tAvgCov\n")
try:
with open(bedfile, "r") as bed:
for bedline in bed:
if len(bedline) == 0:
continue
if bedline[0] == 0:
continue
parsed = bedline.rstrip("\r\n").split("\t")
if len(parsed) < 3:
continue
nreads = 0
nbases = 0
start = int(parsed[1])
end = int(parsed[2])
for read in bf.fetch(parsed[0], start, end):
if read.is_unmapped:
continue
ov = read.get_overlap(start, end)
readlen = read.query_length
nreads += (1.0 * ov) / readlen
nbases += ov
# print("s={}, e={}, l={}, ov={}, nr={}, nb={}".format(read.reference_start, read.reference_end, readlen, ov, nreads, nbases))
# raw_input()
if self.allcols:
self.out.write("\t".join(parsed) + "\t{:.2f}\t{}\t{:.2f}\n".format(nreads, nbases, (1.0 * nbases) / (end - start)))
else:
self.out.write("{}\t{}\t{}\t{:.2f}\t{}\t{:.2f}\n".format(parsed[0], start, end, nreads, nbases, (1.0 * nbases) / (end - start)))
finally:
bf.close()
def parseArgs(C, args):
next = ""
filenames = []
for a in args:
if next == '-m':
C.mincov = P.toInt(a)
next = ""
elif next == "-o":
C.outfile = a
next = ""
elif next == "-c":
C.wanted = a
next = ""
elif next == "-b":
C.bedfile = P.isFile(a)
next = ""
elif a in ['-m', "-o", "-c", "-b"]:
next = a
elif a == "-a":
C.allcols = True
else:
filenames.append(P.isFile(a))
return filenames
if __name__=="__main__":
C = Cov()
Tot = Cov()
Tot.chrom = "Total"
args = sys.argv[1:]
P.standardOpts(args)
filenames = parseArgs(C, args)
try:
if C.outfile:
C.out = open(C.outfile, "w")
Tot.out = C.out
if filenames == []:
C.doCovFile(sys.stdin, Tot)
elif filenames[0].endswith(".bam"):
if C.bedfile:
C.doBEDfile(filenames[0], C.bedfile)
else:
C.doBAMfile(filenames[0], Tot)
else:
C.doCovFile(open(filenames[0], "r"), Tot)
finally:
C.out.close()
|
albertoriva/bioscripts
|
chromCoverage.py
|
Python
|
gpl-3.0
| 8,364
|
[
"pysam"
] |
7f3b681bad410a90e4e95749aedefbfb9c3a2962b7b2e8b0cc040a12b2427d39
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from PyQt4.QtCore import Qt
from camelot.view.controls.editors import MonthsEditor
from camelot.view.controls.delegates.customdelegate import CustomDelegate, DocumentationMetaclass
from camelot.core.utils import variant_to_pyobject, ugettext
from camelot.view.proxy import ValueLoading
class MonthsDelegate(CustomDelegate):
"""MonthsDelegate
custom delegate for showing and editing months and years
"""
editor = MonthsEditor
__metaclass__ = DocumentationMetaclass
def __init__(self, parent=None, forever=200*12, **kwargs):
"""
:param forever: number of months that will be indicated as Forever, set
to None if not appliceable
"""
super(MonthsDelegate, self).__init__(parent=parent, **kwargs)
self._forever = forever
def sizeHint(self, option, index):
q = MonthsEditor(None)
return q.sizeHint()
def paint(self, painter, option, index):
painter.save()
self.drawBackground(painter, option, index)
value = variant_to_pyobject( index.model().data( index, Qt.EditRole ) )
value_str = u''
if self._forever != None and value == self._forever:
value_str = ugettext('Forever')
elif value not in (None, ValueLoading):
years, months = divmod( value, 12 )
if years:
value_str = value_str + ugettext('%i years ')%(years)
if months:
value_str = value_str + ugettext('%i months')%(months)
self.paint_text(painter, option, index, value_str)
painter.restore()
|
jeroendierckx/Camelot
|
camelot/view/controls/delegates/monthsdelegate.py
|
Python
|
gpl-2.0
| 2,687
|
[
"VisIt"
] |
480b711e7455a2bbb2ff6744489830589e25fbdf5ad8e9f0776eace26831678f
|
from urllib.request import urlopen
from behave import then, when
@when(u'I visit "{url}"')
def visit(context, url):
page = urlopen(context.base_url + url)
context.response = str(page.read())
@then(u'I should see "{text}"')
def i_should_see(context, text):
assert text in context.response
|
behave/behave-django
|
tests/acceptance/steps/live_test_server.py
|
Python
|
mit
| 305
|
[
"VisIt"
] |
1f842833b7c3e08af8e1829244074a99b500787d811a1ec92cd80a782c4499b3
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% raw %}{% url 'admin:index' %}{% endraw %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('{{ cookiecutter.project_slug }}.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
webyneter/cookiecutter-django
|
{{cookiecutter.project_slug}}/config/urls.py
|
Python
|
bsd-3-clause
| 1,592
|
[
"VisIt"
] |
ee77123014b1df95c5b3cdaa11ed35623592fc07f57b0d50fdb1a4fc47ec7b6f
|
"""A quick DOM implementation.
Python's xml.dom is very slow. The xml.sax module is also slow (as it imports urllib2).
This is our light-weight version.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from xml.parsers import expat
class Element(object):
"""An XML element.
@ivar uri: the element's namespace
@type uri: str
@ivar name: the element's localName
@type name: str
@ivar attrs: the element's attributes (key is in the form [namespace " "] localName)
@type attrs: {str: str}
@ivar childNodes: children
@type childNodes: [L{Element}]
@ivar content: the text content
@type content: str"""
__slots__ = ['uri', 'name', 'attrs', 'childNodes', 'content']
def __init__(self, uri, name, attrs):
self.uri = uri
self.name = name
self.attrs = attrs.copy()
self.content = None
self.childNodes = []
def __str__(self):
attrs = [n + '=' + self.attrs[n] for n in self.attrs]
start = '<{%s}%s %s' % (self.uri, self.name, ' '.join(attrs))
if self.childNodes:
return start + '>' + '\n'.join(map(str, self.childNodes)) + ('</%s>' % (self.name))
elif self.content:
return start + '>' + self.content + ('</%s>' % (self.name))
else:
return start + '/>'
def getAttribute(self, name):
return self.attrs.get(name, None)
def toDOM(self, doc, prefixes):
"""Create a DOM Element for this qdom.Element.
@param doc: document to use to create the element
@return: the new element
"""
elem = prefixes.createElementNS(doc, self.uri, self.name)
for fullname, value in self.attrs.iteritems():
if ' ' in fullname:
ns, localName = fullname.split(' ', 1)
else:
ns, localName = None, fullname
prefixes.setAttributeNS(elem, ns, localName, value)
for child in self.childNodes:
elem.appendChild(child.toDOM(doc, prefixes))
if self.content:
elem.appendChild(doc.createTextNode(self.content))
return elem
class QSAXhandler:
"""SAXHandler that builds a tree of L{Element}s"""
def __init__(self):
self.stack = []
def startElementNS(self, fullname, attrs):
split = fullname.split(' ', 1)
if len(split) == 2:
self.stack.append(Element(split[0], split[1], attrs))
else:
self.stack.append(Element(None, fullname, attrs))
self.contents = ''
def characters(self, data):
self.contents += data
def endElementNS(self, name):
contents = self.contents.strip()
self.stack[-1].content = contents
self.contents = ''
new = self.stack.pop()
if self.stack:
self.stack[-1].childNodes.append(new)
else:
self.doc = new
def parse(source):
"""Parse an XML stream into a tree of L{Element}s.
@param source: data to parse
@type source: file
@return: the root
@rtype: L{Element}"""
handler = QSAXhandler()
parser = expat.ParserCreate(namespace_separator = ' ')
parser.StartElementHandler = handler.startElementNS
parser.EndElementHandler = handler.endElementNS
parser.CharacterDataHandler = handler.characters
parser.ParseFile(source)
return handler.doc
class Prefixes:
"""Keep track of namespace prefixes. Used when serialising a document.
@since: 0.54
"""
def __init__(self, default_ns):
self.prefixes = {}
self.default_ns = default_ns
def get(self, ns):
prefix = self.prefixes.get(ns, None)
if prefix:
return prefix
prefix = 'ns%d' % len(self.prefixes)
self.prefixes[ns] = prefix
return prefix
def setAttributeNS(self, elem, uri, localName, value):
if uri is None:
elem.setAttributeNS(None, localName, value)
else:
elem.setAttributeNS(uri, self.get(uri) + ':' + localName, value)
def createElementNS(self, doc, uri, localName):
if uri == self.default_ns:
return doc.createElementNS(uri, localName)
else:
return doc.createElementNS(uri, self.get(uri) + ':' + localName)
|
timdiels/zeroinstall
|
zeroinstall/injector/qdom.py
|
Python
|
lgpl-2.1
| 3,777
|
[
"VisIt"
] |
7f4689596d68dc9cef692cd141d0b0ffad1bda32a34e481463b161522b96e12b
|
import os
from itertools import count
from pyjade import Parser, Compiler as _Compiler
from pyjade.runtime import attrs
from pyjade.utils import process
def process_param(key, value, terse=False):
if terse:
if (key == value) or (value is True):
return key
if isinstance(value, basestring):
value = value.decode('utf8')
return '''%s="%s"''' % (key, value)
class Compiler(_Compiler):
def __init__(self, *args, **kws):
_Compiler.__init__(self, *args, **kws)
self._i = count()
def visitAssignment(self,assignment):
self.buffer('<%% var %s = %s; %%>'%(assignment.name,assignment.val))
def visitCode(self,code):
if code.buffer:
val = code.val.lstrip()
self.buf.append('<%%%s %s %%>'%('=' if code.escape else '-', val))
else:
self.buf.append('<%% %s'%code.val) #for loop
if code.block:
self.buf.append(' { %>') #for loop
# if not code.buffer: self.buf.append('{')
self.visit(code.block)
# if not code.buffer: self.buf.append('}')
if not code.buffer:
codeTag = code.val.strip().split(' ',1)[0]
if codeTag in self.autocloseCode:
self.buf.append('<% } %>')
elif not code.buffer:
self.buf.append('; %>') #for loop
def visitEach(self,each):
#self.buf.append('{%% for %s in %s %%}'%(','.join(each.keys),each.obj))
__i = self._i.next()
self.buf.append('<%% for (_i_%s = 0, _len_%s = %s.length; _i_%s < _len_%s; _i_%s++) { ' %(__i, __i, each.obj, __i, __i, __i))
if len(each.keys) > 1:
for i, k in enumerate(each.keys):
self.buf.append('%s = %s[_i_%s][%s];' % (k, each.obj, __i, i))
else:
for k in each.keys:
self.buf.append('%s = %s[_i_%s];' % (k, each.obj, __i))
self.buf.append(' %>')
self.visit(each.block)
self.buf.append('<% } %>')
def _do_eval(self, value):
if isinstance(value, basestring):
value = value.encode('utf-8')
try:
value = eval(value, {}, {})
except:
return "<%%= %s %%>" % value
return value
def _get_value(self, attr):
value = attr['val']
if attr['static']:
return attr['val']
if isinstance(value, basestring):
return self._do_eval(value)
else:
return attr['name']
def visitAttributes(self,attrs):
classes = []
params = []
for attr in attrs:
if attr['name'] == 'class':
value = self._get_value(attr)
if isinstance(value, list):
classes.extend(value)
else:
classes.append(value)
else:
value = self._get_value(attr)
if (value is not None) and (value is not False):
params.append((attr['name'], value))
if classes:
classes = [unicode(c) for c in classes]
params.append(('class', " ".join(classes)))
if params:
self.buf.append(" "+" ".join([process_param(k, v, self.terse) for (k,v) in params]))
def visitConditional(self,conditional):
TYPE_CODE = {
'if': lambda x: 'if (%s)'%x,
'unless': lambda x: 'if (!%s)'%x,
'elif': lambda x: '} else if (%s)'%x,
'else': lambda x: '} else'
}
self.buf.append('\n<%% %s { %%>'%TYPE_CODE[conditional.type](conditional.sentence))
if conditional.block:
self.visit(conditional.block)
for next in conditional.next:
self.visitConditional(next)
if conditional.type in ['if','unless']: self.buf.append('\n<% } %>\n')
def interpolate(self,text):
return self._interpolate(text,lambda x:'<%%= %s %%>'%x)
|
glennyonemitsu/MarkupHiveServer
|
src/pyjade/ext/underscore.py
|
Python
|
mit
| 4,076
|
[
"VisIt"
] |
75b905e92bcb5f32aeeb6f4a794baafde74585dbaaf64a099c360c891e976dad
|
"""OpenMM molecular dynamics runner with accessory classes.
OpenMM is a library with support for running molecular dynamics
simulations with specific support for fast GPU calculations. The
component based architecture of OpenMM makes it a perfect fit with
wepy.
In addition to the principle OpenMMRunner class there are a few
classes here that make using OpenMM runner more efficient.
First is a WalkerState class (OpenMMState) that wraps the openmm state
object directly, itself is a wrapper around the C++
datastructures. This gives better performance by not performing copies
to a WalkerState dictionary.
Second, is the OpenMMWalker which is identical to the Walker class
except that it enforces the state is an actual instantiation of
OpenMMState. Use of this is optional.
Finally, is the OpenMMGPUWorker class. This is to be used as the
worker type for the WorkerMapper work mapper. This is necessary to
allow passing of the device index to OpenMM for which GPU device to
use.
"""
from copy import copy
import random as rand
from warnings import warn
import logging
import time
from eliot import log_call, start_action
import numpy as np
try:
import simtk.openmm.app as omma
import simtk.openmm as omm
import simtk.unit as unit
except ModuleNotFoundError:
raise ModuleNotFoundError("OpenMM has not been installed, which this runner requires.")
from wepy.walker import Walker, WalkerState
from wepy.runners.runner import Runner
from wepy.work_mapper.worker import Worker
from wepy.work_mapper.task_mapper import WalkerTaskProcess
from wepy.reporter.reporter import Reporter
from wepy.util.util import box_vectors_to_lengths_angles
## Constants
KEYS = ('positions', 'velocities', 'forces', 'kinetic_energy',
'potential_energy', 'time', 'box_vectors', 'box_volume',
'parameters', 'parameter_derivatives')
"""Names of the fields of the OpenMMState."""
# when we use the get_state function from the simulation context we
# can pass options for what kind of data to get, this is the default
# to get all the data. TODO not really sure what the 'groups' keyword
# is for though
GET_STATE_KWARG_DEFAULTS = (('getPositions', True),
('getVelocities', True),
('getForces', True),
('getEnergy', True),
('getParameters', True),
('getParameterDerivatives', True),
('enforcePeriodicBox', True),)
"""Mapping of key word arguments to the simulation.context.getState
method for retrieving data for a simulation state. By default we set
each as True to retrieve all information. The presence or absence of
them is handled by the OpenMMState.
"""
# the Units objects that OpenMM uses internally and are returned from
# simulation data
# TODO: this is never used and we only need the unit names. Its okay
# to use simtk.units here but other runners should use a units sytem
# like pint which is easier to install. So we should remove this since
# its not used.
# UNITS = (('positions_unit', unit.nanometer),
# ('time_unit', unit.picosecond),
# ('box_vectors_unit', unit.nanometer),
# ('velocities_unit', unit.nanometer/unit.picosecond),
# ('forces_unit', unit.kilojoule / (unit.nanometer * unit.mole)),
# ('box_volume_unit', unit.nanometer),
# ('kinetic_energy_unit', unit.kilojoule / unit.mole),
# ('potential_energy_unit', unit.kilojoule / unit.mole),
# )
# """Mapping of units identifiers to the corresponding simtk.units Unit objects."""
# the names of the units from the units objects above. This is used
# for saving them to files
UNIT_NAMES = (('positions_unit', unit.nanometer.get_name()),
('time_unit', unit.picosecond.get_name()),
('box_vectors_unit', unit.nanometer.get_name()),
('velocities_unit', (unit.nanometer/unit.picosecond).get_name()),
('forces_unit', (unit.kilojoule / (unit.nanometer * unit.mole)).get_name()),
('box_volume_unit', unit.nanometer.get_name()),
('kinetic_energy_unit', (unit.kilojoule / unit.mole).get_name()),
('potential_energy_unit', (unit.kilojoule / unit.mole).get_name()),
)
"""Mapping of unit identifier strings to the serialized string spec of the unit."""
# a random seed will be chosen from 1 to RAND_SEED_RANGE_MAX when the
# Langevin integrator is created. 0 is the default and special value
# which will then choose a random value when the integrator is created
# TODO: test this isn't needed
# RAND_SEED_RANGE_MAX = 1000000
# the runner for the simulation which runs the actual dynamics
class OpenMMRunner(Runner):
"""Runner for OpenMM simulations."""
def __init__(self, system, topology, integrator,
platform=None,
platform_kwargs=None,
enforce_box=False):
"""Constructor for OpenMMRunner.
Parameters
----------
system : simtk.openmm.System object
The system (forcefields) for the simulation.
topology : simtk.openmm.app.Topology object
The topology for you system.
integrator : subclass simtk.openmm.Integrator object
Integrator for propagating dynamics.
platform : str
The specification for the default computational platform
to use. Platform can also be set when run_segment is
called. If None uses OpenMM default platform, see OpenMM
documentation for all value but typical ones are:
Reference, CUDA, OpenCL. If value is None the automatic
platform determining mechanism in OpenMM will be used.
platform_kwargs : dict of str : bool, optional
key-values to set for a platform with
platform.setPropertyDefaultValue as the default for this
runner.
enforce_box : bool
Calls 'context.getState' with 'enforcePeriodicBox' if True.
(Default value = False)
Warnings
--------
Regarding the 'enforce_box' option.
When retrieving states from an OpenMM simulation Context, you
have the option to enforce periodic boundary conditions in the
resulting atomic positions in a topology aware way that
doesn't break bonds through boundaries. This is convenient for
post-processing as this can be a complex task and is not
readily exposed in the OpenMM API as a standalone function.
However, in some types of simulations the periodic box vectors
are ignored (such as implicit solvent ones) despite there
being no option to not have periodic boundaries in the context
itself. Likely if you are running one of these kinds of
simulations you will not pay attention to the box vectors at
all and the random defaults that exist will be very wrong but
this incorrectness will not show in a non-wepy simulation with
openmm unless you are handling the context states
yourself. Then when you run in wepy the default of True to
enforce the boxes will be applied and confusingly wrong
answers will result that are difficult to find root cause of.
"""
assert isinstance(platform, str), f"platform should be a string, not {type(platform)}"
# we save the different components. However, if we are to make
# this runner picklable we have to convert the SWIG objects to
# a picklable form
self.system = system
self.integrator = integrator
# these are not SWIG objects
self.topology = topology
self.platform_name = platform
self.platform_kwargs = platform_kwargs
self.enforce_box = enforce_box
self.getState_kwargs = dict(GET_STATE_KWARG_DEFAULTS)
# update with the user based enforce_box
self.getState_kwargs['enforcePeriodicBox'] = self.enforce_box
self._cycle_platform = None
self._cycle_platform_kwargs = None
# for special monitoring purposes to get split times to debug
# performance
self._last_cycle_segments_split_times = []
@log_call(include_args=[
'platform',
'platform_kwargs',
],
include_result=False)
def pre_cycle(self,
platform=None,
platform_kwargs=None,
**kwargs
):
# choose to use the platform spec in this function call or to
# use the default one saved in the runner
# if the platform is given locally use this one
if platform is not None:
logging.info(f"Setting the platform ({platform}) in the 'pre_cycle' OpenMM Runner call"
f"with platform kwargs: {platform_kwargs}"
)
# set the platform and kwargs for this cycle
self._cycle_platform = platform
self._cycle_platform_kwargs = platform_kwargs
# otherwise we just don't set this and let resolution of
# platform happen at run segment.
super().pre_cycle(**kwargs)
# each segment split times will get appended to this
self._last_cycle_segments_split_times = []
@log_call(include_args=[],
include_result=False)
def post_cycle(self, **kwargs):
super().post_cycle(**kwargs)
# remove the platform and kwargs for this cycle
self._cycle_platform = None
self._cycle_platform_kwargs = None
def _resolve_platform(self,
platform,
platform_kwargs,
):
# resolve which platform to use
# force usage of environmental one
if platform is Ellipsis:
platform_name = None
platform_kwargs = None
# use the runtime given one
elif platform is not None:
platform_name = platform
platform_kwargs = platform_kwargs
# if the pre_cycle configured platform is set use this over
# the default
elif self._cycle_platform is not None:
platform_name = self._cycle_platform
platform_kwargs = self._cycle_platform_kwargs
# use the default one
elif self.platform_name is not None:
platform_name = self.platform_name
platform_kwargs = self.platform_kwargs
# if the default is not set fall back to the environmental one
else:
platform_name = None
platform_kwargs = None
return platform_name, \
platform_kwargs,
@log_call(
include_args=[
'segment_length',
'getState_kwargs',
'platform',
'platform_kwargs',
],
include_result=False,
)
def run_segment(self,
walker,
segment_length,
getState_kwargs=None,
platform=None,
platform_kwargs=None,
**kwargs):
"""Run dynamics for the walker.
Parameters
----------
walker : object implementing the Walker interface
The walker for which dynamics will be propagated.
segment_length : int or float
The numerical value that specifies how much dynamics are to be run.
getState_kwargs : dict of str : bool, optional
Specify the key-word arguments to pass to
simulation.context.getState when getting simulation
states. If None defaults object values.
platform : str or None or Ellipsis
The specification for the computational platform to
use. If None will use the default for the runner and
ignore platform_kwargs. If Ellipsis forces the use of the
OpenMM default or environmentally defined platform. See
OpenMM documentation for all value but typical ones are:
Reference, CUDA, OpenCL. If value is None the automatic
platform determining mechanism in OpenMM will be used.
platform_kwargs : dict of str : bool, optional
key-values to set for a platform with
platform.setPropertyDefaultValue for this segment only.
Returns
-------
new_walker : object implementing the Walker interface
Walker after dynamics was run, only the state should be modified.
"""
run_segment_start = time.time()
# set the kwargs that will be passed to getState
tmp_getState_kwargs = getState_kwargs
logging.info("Default 'getState_kwargs' in runner: "
f"{self.getState_kwargs}")
logging.info("'getState_kwargs' passed to 'run_segment' : "
f"{getState_kwargs}")
# start with the object value
getState_kwargs = copy(self.getState_kwargs)
if tmp_getState_kwargs is not None:
getState_kwargs.update(tmp_getState_kwargs)
logging.info("After resolving 'getState_kwargs' that will be used are: "
f"{getState_kwargs}")
gen_sim_start = time.time()
# make a copy of the integrator for this particular segment
new_integrator = copy(self.integrator)
# force setting of random seed to 0, which is a special
# value that forces the integrator to choose another
# random number
new_integrator.setRandomNumberSeed(0)
## Platform
logging.info("Default 'platform' in runner: "
f"{self.platform_name}")
logging.info("pre_cycle set 'platform' in runner: "
f"{self._cycle_platform}")
logging.info("'platform' passed to 'run_segment' : "
f"{platform}")
logging.info("Default 'platform_kwargs' in runner: "
f"{self.platform_kwargs}")
logging.info("pre_cycle set 'platform_kwargs' in runner: "
f"{self._cycle_platform_kwargs}")
logging.info("'platform_kwargs' passed to 'run_segment' : "
f"{platform_kwargs}")
platform_name, platform_kwargs = self._resolve_platform(
platform, platform_kwargs
)
logging.info("Resolved 'platform' : "
f"{platform_name}")
logging.info("Resolved 'platform_kwargs' : "
f"{platform_kwargs}")
# create simulation object
## create the platform and customize
# if a platform was given we use it to make a Simulation object
if platform_name is not None:
logging.info("Using platform configured in code.")
# get the platform by its name to use
platform = omm.Platform.getPlatformByName(platform_name)
logging.info(f"Platform object created: {platform}")
if platform_kwargs is None:
platform_kwargs = {}
# set properties from the kwargs if they apply to the platform
for key, value in platform_kwargs.items():
if key in platform.getPropertyNames():
logging.info(f"Setting platform property: {key} : {value}")
platform.setPropertyDefaultValue(key, value)
else:
warn(f"Platform kwargs given ({key} : {value}) "
f"but is not valid for this platform ({platform_name})")
# make a new simulation object
simulation = omma.Simulation(self.topology, self.system,
new_integrator, platform)
# otherwise just use the default or environmentally defined one
else:
logging.info("Using environmental platform.")
simulation = omma.Simulation(self.topology, self.system,
new_integrator)
# set the state to the context from the walker
simulation.context.setState(walker.state.sim_state)
gen_sim_end = time.time()
gen_sim_time = gen_sim_end - gen_sim_start
logging.info("Time to generate the system: {}".format(gen_sim_time))
# actually run the simulation
steps_start = time.time()
# Run the simulation segment for the number of time steps
with start_action(action_type="OpenMM Simulation.steps") as ommsim_cx:
simulation.step(segment_length)
steps_end = time.time()
steps_time = steps_end - steps_start
logging.info("Time to run {} sim steps: {}".format(segment_length, steps_time))
get_state_start = time.time()
get_state_end = time.time()
get_state_time = get_state_end - get_state_start
logging.info("Getting context state time: {}".format(get_state_time))
# generate the new state/walker
new_state = self.generate_state(simulation, segment_length,
walker, getState_kwargs)
# create a new walker for this
new_walker = OpenMMWalker(new_state, walker.weight)
run_segment_end = time.time()
run_segment_time = run_segment_end - run_segment_start
logging.info("Total internal run_segment time: {}".format(run_segment_time))
segment_split_times = {
'gen_sim_time' : gen_sim_time,
'steps_time' : steps_time,
'get_state_time' : get_state_time,
'run_segment_time' : run_segment_time,
}
self._last_cycle_segments_split_times.append(segment_split_times)
return new_walker
@log_call(include_args=['getState_kwargs'],
include_result=False)
def generate_state(self, simulation, segment_length, starting_walker, getState_kwargs):
"""Method for generating a wepy compliant state from an OpenMM
simulation object and data about the last segment of dynamics run.
Parameters
----------
simulation : simtk.openmm.app.Simulation object
A complete simulation object from which the state will be extracted.
segment_length : int
The number of integration steps run in a segment of simulation.
starting_walker : wepy.walker.Walker subclass object
The walker that was the beginning of this segment of simyulation.
getState_kwargs : dict of str : bool
Specify the key-word arguments to pass to
simulation.context.getState when getting simulation
states.
Returns
-------
new_state : wepy.runners.openmm.OpenMMState object
A new state from the simulation state.
This method is meant to be called from within the
`run_segment` method during a simulation. It can be customized
in subclasses to allow for the addition of custom attributes
for a state, in addition to the base ones implemented in the
interface to the openmm simulation state in OpenMMState.
The extra arguments to this function are data that would allow
for the calculation of integral values over the duration of
the segment, such as time elapsed and differences from the
starting state.
"""
# save the state of the system with all possible values
new_sim_state = simulation.context.getState(**getState_kwargs)
# make an OpenMMState wrapper with this
new_state = OpenMMState(new_sim_state)
return new_state
class OpenMMState(WalkerState):
"""Walker state that wraps an simtk.openmm.State object.
The keys for which values in the state are available are given by
the KEYS module constant (accessible through the class constant of
the same name as well).
Additional fields can be added to these states through passing
extra kwargs to the constructor. These will be automatically given
a suffix of "_OTHER" to avoid name clashes.
"""
KEYS = KEYS
"""The provided attribute keys for the state."""
OTHER_KEY_TEMPLATE = "{}_OTHER"
"""String formatting template for attributes not set in KEYS."""
def __init__(self, sim_state, **kwargs):
"""Constructor for OpenMMState.
Parameters
----------
state : simtk.openmm.State object
The simulation state retrieved from the simulation constant.
kwargs : optional
Additional attributes to set for the state. Will add the
"_OTHER" suffix to the keys
"""
# save the simulation state
self._sim_state = sim_state
# save additional data if given
self._data = {}
for key, value in kwargs.items():
# if the key is already in the sim_state keys we need to
# modify it and raise a warning
if key in self.KEYS:
warn("Key {} in kwargs is already taken by this class, renaming to {}".format(
self.OTHER_KEY_TEMPLATE).format(key))
# make a new key
new_key = self.OTHER_KEY_TEMPLATE.format(key)
# set it in the data
self._data[new_key] = value
# otherwise just set it
else:
self._data[key] = value
@property
def sim_state(self):
"""The underlying simtk.openmm.State object this is wrapping."""
return self._sim_state
def __getitem__(self, key):
# if this was a key for data not mapped from the OpenMM.State
# object we use the _data attribute
if (key not in self.KEYS) and (
(not key.startswith('parameters')) and
(not key.startswith('parameter_derivatives'))
):
return self._data[key]
# otherwise we have to specifically get the correct data and
# process it into an array from the OpenMM.State
else:
if key == 'positions':
return self.positions_values()
elif key == 'velocities':
return self.velocities_values()
elif key == 'forces':
return self.forces_values()
elif key == 'kinetic_energy':
return self.kinetic_energy_value()
elif key == 'potential_energy':
return self.potential_energy_value()
elif key == 'time':
return self.time_value()
elif key == 'box_vectors':
return self.box_vectors_values()
elif key == 'box_volume':
return self.box_volume_value()
# handle the parameters differently since they are dictionaries of values
elif key.startswith('parameters'):
parameters_dict = self.parameters_values()
if parameters_dict is None:
return None
else:
# TODO: this was an attempt at a general way to do
# this but it doesn't work and I only ever need
# one nested level, so for now we just implement it that way
# return self._get_nested_attr_from_compound_key(key, parameters_dict)
param_key = key.split('/')[-1]
return parameters_dict[param_key]
elif key.startswith('parameter_derivatives'):
pd_dict = self.parameter_derivatives_values()
if pd_dict is None:
return None
else:
return self._get_nested_attr_from_compound_key(key, pd_dict)
## Array properties
# Positions
@property
def positions(self):
"""The positions of the state as a numpy array simtk.units.Quantity object."""
try:
return self.sim_state.getPositions(asNumpy=True)
except:
warn("Unknown exception handled from `self.sim_state.getPositions()`, "
"this is probably because this attribute is not in the State.")
return None
@property
def positions_unit(self):
"""The units (as a simtk.units.Unit object) the positions are in."""
return self.positions.unit
def positions_values(self):
"""The positions of the state as a numpy array in the positions_unit
simtk.units.Unit. This is what is returned by the __getitem__
accessor.
"""
return self.positions.value_in_unit(self.positions_unit)
# Velocities
@property
def velocities(self):
"""The velocities of the state as a numpy array simtk.units.Quantity object."""
try:
return self.sim_state.getVelocities(asNumpy=True)
except:
warn("Unknown exception handled from `self.sim_state.getVelocities()`, "
"this is probably because this attribute is not in the State.")
return None
@property
def velocities_unit(self):
"""The units (as a simtk.units.Unit object) the velocities are in."""
return self.velocities.unit
def velocities_values(self):
"""The velocities of the state as a numpy array in the velocities_unit
simtk.units.Unit. This is what is returned by the __getitem__
accessor.
"""
velocities = self.velocities
if velocities is None:
return None
else:
return self.velocities.value_in_unit(self.velocities_unit)
# Forces
@property
def forces(self):
"""The forces of the state as a numpy array simtk.units.Quantity object."""
try:
return self.sim_state.getForces(asNumpy=True)
except:
warn("Unknown exception handled from `self.sim_state.getForces()`, "
"this is probably because this attribute is not in the State.")
return None
@property
def forces_unit(self):
"""The units (as a simtk.units.Unit object) the forces are in."""
return self.forces.unit
def forces_values(self):
"""The forces of the state as a numpy array in the forces_unit
simtk.units.Unit. This is what is returned by the __getitem__
accessor.
"""
forces = self.forces
if forces is None:
return None
else:
return self.forces.value_in_unit(self.forces_unit)
# Box Vectors
@property
def box_vectors(self):
"""The box vectors of the state as a numpy array simtk.units.Quantity object."""
try:
return self.sim_state.getPeriodicBoxVectors(asNumpy=True)
except:
warn("Unknown exception handled from `self.sim_state.getPeriodicBoxVectors()`, "
"this is probably because this attribute is not in the State.")
return None
@property
def box_vectors_unit(self):
"""The units (as a simtk.units.Unit object) the box vectors are in."""
return self.box_vectors.unit
def box_vectors_values(self):
"""The box vectors of the state as a numpy array in the
box_vectors_unit simtk.units.Unit. This is what is returned by
the __getitem__ accessor.
"""
box_vectors = self.box_vectors
if box_vectors is None:
return None
else:
return self.box_vectors.value_in_unit(self.box_vectors_unit)
## non-array properties
# Kinetic Energy
@property
def kinetic_energy(self):
"""The kinetic energy of the state as a numpy array simtk.units.Quantity object."""
try:
return self.sim_state.getKineticEnergy()
except:
warn("Unknown exception handled from `self.sim_state.getKineticEnergy()`, "
"this is probably because this attribute is not in the State.")
return None
@property
def kinetic_energy_unit(self):
"""The units (as a simtk.units.Unit object) the kinetic energy is in."""
return self.kinetic_energy.unit
def kinetic_energy_value(self):
"""The kinetic energy of the state as a numpy array in the kinetic_energy_unit
simtk.units.Unit. This is what is returned by the __getitem__
accessor.
"""
kinetic_energy = self.kinetic_energy
if kinetic_energy is None:
return None
else:
return np.array([self.kinetic_energy.value_in_unit(self.kinetic_energy_unit)])
# Potential Energy
@property
def potential_energy(self):
"""The potential energy of the state as a numpy array simtk.units.Quantity object."""
try:
return self.sim_state.getPotentialEnergy()
except:
warn("Unknown exception handled from `self.sim_state.getPotentialEnergy()`, "
"this is probably because this attribute is not in the State.")
return None
@property
def potential_energy_unit(self):
"""The units (as a simtk.units.Unit object) the potential energy is in."""
return self.potential_energy.unit
def potential_energy_value(self):
"""The potential energy of the state as a numpy array in the potential_energy_unit
simtk.units.Unit. This is what is returned by the __getitem__
accessor.
"""
potential_energy = self.potential_energy
if potential_energy is None:
return None
else:
return np.array([self.potential_energy.value_in_unit(self.potential_energy_unit)])
# Time
@property
def time(self):
"""The time of the state as a numpy array simtk.units.Quantity object."""
try:
return self.sim_state.getTime()
except:
warn("Unknown exception handled from `self.sim_state.getTime()`, "
"this is probably because this attribute is not in the State.")
return None
@property
def time_unit(self):
"""The units (as a simtk.units.Unit object) the time is in."""
return self.time.unit
def time_value(self):
"""The time of the state as a numpy array in the time_unit
simtk.units.Unit. This is what is returned by the __getitem__
accessor.
"""
time = self.time
if time is None:
return None
else:
return np.array([self.time.value_in_unit(self.time_unit)])
# Box Volume
@property
def box_volume(self):
"""The box volume of the state as a numpy array simtk.units.Quantity object."""
try:
return self.sim_state.getPeriodicBoxVolume()
except:
warn("Unknown exception handled from `self.sim_state.getPeriodicBoxVolume()`, "
"this is probably because this attribute is not in the State.")
return None
@property
def box_volume_unit(self):
"""The units (as a simtk.units.Unit object) the box volume is in."""
return self.box_volume.unit
def box_volume_value(self):
"""The box volume of the state as a numpy array in the box_volume_unit
simtk.units.Unit. This is what is returned by the __getitem__
accessor.
"""
box_volume = self.box_volume
if box_volume is None:
return None
else:
return np.array([self.box_volume.value_in_unit(self.box_volume_unit)])
## Dictionary properties
## Unitless
# Parameters
@property
def parameters(self):
"""The parameters of the state as a dictionary mapping the names of
the parameters to their values which are numpy array
simtk.units.Quantity objects.
"""
try:
return self.sim_state.getParameters()
except:
warn("Unknown exception handled from `self.sim_state.getParameters()`, "
"this is probably because this attribute is not in the State.")
return None
@property
def parameters_unit(self):
"""The units for each parameter as a dictionary mapping parameter
names to their corresponding unit as a simtk.units.Unit
object.
"""
param_units = {key : None for key, val in self.parameters.items()}
return param_units
def parameters_values(self):
"""The parameters of the state as a dictionary mapping the name of the
parameter to a numpy array in the unit for the parameter of the
same name in the parameters_unit corresponding
simtk.units.Unit object. This is what is returned by the
__getitem__ accessor using the compound key syntax with the
prefix 'parameters', e.g. state['parameter/paramA'] for the
parameter 'paramA'.
"""
if self.parameters is None:
return None
param_arrs = {key : np.array(val) for key, val
in self.parameters.items()}
# return None if there is nothing in this
if len(param_arrs) == 0:
return None
else:
return param_arrs
# Parameter Derivatives
@property
def parameter_derivatives(self):
"""The parameter derivatives of the state as a dictionary mapping the
names of the parameters to their values which are numpy array
simtk.units.Quantity objects.
"""
try:
return self.sim_state.getEnergyParameterDerivatives()
except:
warn("Unknown exception handled from `self.sim_state.getEnergyParameterDerivatives()`, "
"this is probably because this attribute is not in the State.")
return None
@property
def parameter_derivatives_unit(self):
"""The units for each parameter derivative as a dictionary mapping
parameter names to their corresponding unit as a
simtk.units.Unit object.
"""
param_units = {key : None for key, val in self.parameter_derivatives.items()}
return param_units
def parameter_derivatives_values(self):
"""The parameter derivatives of the state as a dictionary mapping the
name of the parameter to a numpy array in the unit for the
parameter of the same name in the parameters_unit
corresponding simtk.units.Unit object. This is what is
returned by the __getitem__ accessor using the compound key
syntax with the prefix 'parameter_derivatives',
e.g. state['parameter_derivatives/paramA'] for the parameter
'paramA'.
"""
if self.parameter_derivatives is None:
return None
param_arrs = {key : np.array(val) for key, val
in self.parameter_derivatives.items()}
# return None if there is nothing in this
if len(param_arrs) == 0:
return None
else:
return param_arrs
# for the dict attributes we need to transform the keys for making
# a proper state where all __getitem__ things are arrays
def _dict_attr_to_compound_key_dict(self, root_key, attr_dict):
"""Transform a dictionary of values within the compound key 'root_key'
to a dictionary mapping compound keys to values.
For example give the root_key 'parameters' and the parameters
dictionary {'paramA' : 1.234} returns {'parameters/paramA' : 1.234}.
Parameters
----------
root_key : str
The compound key prefix
attr_dict : dict of str : value
The dictionary with simple keys within the root key namespace.
Returns
-------
compound_key_dict : dict of str : value
The dictionary with the compound keys.
"""
key_template = "{}/{}"
cmpd_key_d = {}
for key, value in attr_dict.items():
new_key = key_template.format(root_key, key)
# if this is a proper feature
if type(value) == np.ndarray:
cmpd_key_d[new_key] = value
elif hasattr(value, '__getitem__'):
cmpd_key_d.update(self._dict_attr_to_compound_key_dict(new_key, value))
else:
raise TypeError("Unsupported attribute type")
return cmpd_key_d
def _get_nested_attr_from_compound_key(self, compound_key, compound_feat_dict):
"""Get arbitrarily deeply nested compound keys from the full
dictionary tree.
Parameters
----------
compound_key : str
Compound key separated by '/' characters
compound_feat_dict : dict
Dictionary of arbitrary depth
Returns
-------
value
Value requested by the key.
"""
key_components = compound_key.split('/')
# if there is only one component of the key then it is not
# really compound, we won't complain just return the
# "dictionary" if it is not actually a dict like
if not hasattr(compound_feat_dict, '__getitem__'):
raise TypeError("Must provide a dict-like with the compound key")
value = compound_feat_dict[key_components[0]]
# if the value itself is compound recursively fetch the value
if hasattr(value, '__getitem__') and len(key_components[1:]) > 0:
subgroup_key = '/'.join(key_components[1:])
return self._get_nested_attr_from_compound_key(subgroup_key, value)
elif hasattr(value, '__getitem__') and len(key_components[1:]) < 1:
raise ValueError("Key does not reference a leaf node of attribute")
# otherwise we have the right key so return the object
else:
return value
def parameters_features(self):
"""Returns a dictionary of the parameters with their appropriate
compound keys. This can be used for placing them in the same namespace
as the rest of the attributes."""
parameters = self.parameters_values()
if parameters is None:
return None
else:
return self._dict_attr_to_compound_key_dict('parameters', parameters)
def parameter_derivatives_features(self):
"""Returns a dictionary of the parameter derivatives with their appropriate
compound keys. This can be used for placing them in the same namespace
as the rest of the attributes."""
parameter_derivatives = self.parameter_derivatives_values()
if parameter_derivatives is None:
return None
else:
return self._dict_attr_to_compound_key_dict('parameter_derivatives',
parameter_derivatives)
def omm_state_dict(self):
"""Return a dictionary with all of the default keys from the wrapped
simtk.openmm.State object"""
feature_d = {'positions' : self.positions_values(),
'velocities' : self.velocities_values(),
'forces' : self.forces_values(),
'kinetic_energy' : self.kinetic_energy_value(),
'potential_energy' : self.potential_energy_value(),
'time' : self.time_value(),
'box_vectors' : self.box_vectors_values(),
'box_volume' : self.box_volume_value(),
}
params = self.parameters_features()
param_derivs = self.parameter_derivatives_features()
if params is not None:
feature_d.update(params)
if param_derivs is not None:
feature_d.update(param_derivs)
return feature_d
def dict(self):
# documented in superclass
d = {}
for key, value in self._data.items():
d[key] = value
for key, value in self.omm_state_dict().items():
d[key] = value
return d
def to_mdtraj(self, topology):
"""Returns an mdtraj.Trajectory object from this walker's state.
Parameters
----------
topology : mdtraj.Topology object
Topology for the state.
Returns
-------
state_traj : mdtraj.Trajectory object
"""
import mdtraj as mdj
# resize the time to a 1D vector
unitcell_lengths, unitcell_angles = box_vectors_to_lengths_angles(self.box_vectors)
return mdj.Trajectory(np.array([self.positions_values()]),
unitcell_lengths=[unitcell_lengths],
unitcell_angles=[unitcell_angles],
topology=topology)
def gen_sim_state(positions, system, integrator,
getState_kwargs=None):
"""Convenience function for generating an omm.State object.
Parameters
----------
positions : arraylike of float
The positions for the system you want to set
system : openmm.app.System object
integrator : openmm.Integrator object
Returns
-------
sim_state : openmm.State object
"""
# handle the getState_kwargs
tmp_getState_kwargs = getState_kwargs
# start with the defaults
getState_kwargs = dict(GET_STATE_KWARG_DEFAULTS)
# if there were customizations use them
if tmp_getState_kwargs is not None:
getState_kwargs.update(tmp_getState_kwargs)
# generate a throwaway context, using the reference platform so we
# don't screw up other platform stuff later in the same process
platform = omm.Platform.getPlatformByName('Reference')
context = omm.Context(system, copy(integrator), platform)
# set the positions
context.setPositions(positions)
# then just retrieve it as a state using the default kwargs
sim_state = context.getState(**getState_kwargs)
return sim_state
def gen_walker_state(positions, system, integrator,
getState_kwargs=None):
"""Convenience function for generating a wepy walker State object for
an openmm simulation state.
Parameters
----------
positions : arraylike of float
The positions for the system you want to set
system : openmm.app.System object
integrator : openmm.Integrator object
Returns
-------
walker_state : wepy.runners.openmm.OpenMMState object
"""
state = OpenMMState(gen_sim_state(positions, system, integrator,
getState_kwargs=getState_kwargs))
return state
class OpenMMWalker(Walker):
"""Walker for OpenMMRunner simulations.
This simply enforces the use of an OpenMMState object for the
walker state attribute.
"""
def __init__(self, state, weight):
# documented in superclass
assert isinstance(state, OpenMMState), \
"state must be an instance of class OpenMMState not {}".format(type(state))
super().__init__(state, weight)
class OpenMMCPUWorker(Worker):
"""Worker for OpenMM GPU simulations (CUDA or OpenCL platforms).
This is intended to be used with the wepy.work_mapper.WorkerMapper
work mapper class.
This class must be used in order to ensure OpenMM runs jobs on the
appropriate GPU device.
"""
NAME_TEMPLATE = "OpenMMCPUWorker-{}"
"""The name template the worker processes are named to substituting in
the process number."""
DEFAULT_NUM_THREADS = 1
def __init__(self, *args, **kwargs):
if 'num_threads' not in kwargs:
num_threads = self.DEFAULT_NUM_THREADS
else:
num_threads = kwargs.pop('num_threads')
super().__init__(*args,
num_threads=num_threads,
**kwargs)
def run_task(self, task):
# documented in superclass
# make the platform kwargs dictionary
platform_options = {'Threads' : str(self.attributes['num_threads'])}
# run the task and pass in the DeviceIndex for OpenMM to
# assign work to the correct GPU
return task(platform_kwargs=platform_options)
class OpenMMGPUWorker(Worker):
"""Worker for OpenMM GPU simulations (CUDA or OpenCL platforms).
This is intended to be used with the wepy.work_mapper.WorkerMapper
work mapper class.
This class must be used in order to ensure OpenMM runs jobs on the
appropriate GPU device.
"""
NAME_TEMPLATE = "OpenMMGPUWorker-{}"
"""The name template the worker processes are named to substituting in
the process number."""
def run_task(self, task):
# get the platform
platform = self.mapper_attributes['platform']
# get the device index from the attributes
device_id = self.mapper_attributes['device_ids'][self._worker_idx]
# make the platform kwargs dictionary
platform_options = {'DeviceIndex' : str(device_id)}
logging.info(f"platform={platform}, platform_options={platform_options}")
return task(
platform=platform,
platform_kwargs=platform_options,
)
class OpenMMCPUWalkerTaskProcess(WalkerTaskProcess):
NAME_TEMPLATE = "OpenMM_CPU_Walker_Task-{}"
def run_task(self, task):
if 'num_threads' in self.mapper_attributes:
num_threads = self.mapper_attributes['num_threads']
# make the platform kwargs dictionary
platform_options = {'Threads' : str(num_threads)}
logging.info(f"Threads={num_threads}")
else:
platform_options = {}
return task(
platform_kwargs=platform_options,
)
class OpenMMGPUWalkerTaskProcess(WalkerTaskProcess):
NAME_TEMPLATE = "OpenMM_GPU_Walker_Task-{}"
def run_task(self, task):
logging.info(f"Starting to run a task as worker {self._worker_idx}")
# get the platform
platform = self.mapper_attributes['platform']
# get the device index from the attributes
device_id = self.mapper_attributes['device_ids'][self._worker_idx]
# make the platform kwargs dictionary
platform_options = {'DeviceIndex' : str(device_id)}
logging.info(f"platform={platform}, platform_options={platform_options}")
return task(
platform=platform,
platform_kwargs=platform_options,
)
|
ADicksonLab/wepy
|
src/wepy/runners/openmm.py
|
Python
|
mit
| 46,448
|
[
"MDTraj",
"OpenMM"
] |
4fc7581ac6ca38b992b5e8a5e74f68e07defc042fb90130d79c2efa082982e56
|
#!/usr/bin/env python
from vtk import *
graph = vtkMutableDirectedGraph()
a = graph.AddVertex()
b = graph.AddChild(a)
c = graph.AddChild(a)
d = graph.AddChild(b)
e = graph.AddChild(c)
f = graph.AddChild(c)
tree = vtkTree()
tree.CheckedShallowCopy(graph)
view = vtkGraphLayoutView()
view.AddRepresentationFromInput(tree)
view.GetRenderWindow().SetSize(600, 600)
view.ResetCamera()
view.Render()
view.GetInteractor().Start()
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Examples/Infovis/Python/create_tree.py
|
Python
|
bsd-3-clause
| 452
|
[
"VTK"
] |
5bccc23e21a308efaa15a4d2e998d1e0ca79229e2f8c1d26d4c24102bacf4399
|
from sympy.core.add import Add
from sympy.core.basic import Basic, C
from sympy.core.expr import Expr
from sympy.core.function import count_ops
from sympy.core.power import Pow
from sympy.core.symbol import Symbol, Dummy
from sympy.core.numbers import Integer, ilcm, Rational, Float
from sympy.core.singleton import S
from sympy.core.sympify import sympify
from sympy.core.compatibility import is_sequence, default_sort_key
from sympy.polys import PurePoly, roots, cancel
from sympy.simplify import simplify as _simplify, signsimp, nsimplify
from sympy.utilities.iterables import flatten
from sympy.functions.elementary.miscellaneous import sqrt, Max, Min
from sympy.printing import sstr
from sympy.core.compatibility import callable, reduce, as_int
from sympy.utilities.exceptions import SymPyDeprecationWarning
from types import FunctionType
def _iszero(x):
"""Returns True if x is zero."""
return x.is_zero
class MatrixError(Exception):
pass
class ShapeError(ValueError, MatrixError):
"""Wrong matrix shape"""
pass
class NonSquareMatrixError(ShapeError):
pass
class DeferredVector(Symbol):
"""A vector whose components are deferred (e.g. for use with lambdify)
Examples
========
>>> from sympy import DeferredVector, lambdify
>>> X = DeferredVector( 'X' )
>>> X
X
>>> expr = (X[0] + 2, X[2] + 3)
>>> func = lambdify( X, expr )
>>> func( [1, 2, 3] )
(3, 6)
"""
def __getitem__(self, i):
if i == -0:
i = 0
if i < 0:
raise IndexError('DeferredVector index out of range')
component_name = '%s[%d]' % (self.name, i)
return Symbol(component_name)
def __str__(self):
return sstr(self)
def __repr__(self):
return "DeferredVector('%s')" % (self.name)
class MatrixBase(object):
# Added just for numpy compatibility
__array_priority__ = 11
is_Matrix = True
is_Identity = None
_class_priority = 3
_sympify = staticmethod(sympify)
@classmethod
def _handle_creation_inputs(cls, *args, **kwargs):
"""Return the number of rows, cols and flat matrix elements.
Examples
========
>>> from sympy import Matrix, I
Matrix can be constructed as follows:
* from a nested list of iterables
>>> Matrix( ((1, 2+I), (3, 4)) )
Matrix([
[1, 2 + I],
[3, 4]])
* from un-nested iterable (interpreted as a column)
>>> Matrix( [1, 2] )
Matrix([
[1],
[2]])
* from un-nested iterable with dimensions
>>> Matrix(1, 2, [1, 2] )
Matrix([[1, 2]])
* from no arguments (a 0 x 0 matrix)
>>> Matrix()
Matrix(0, 0, [])
* from a rule
>>> Matrix(2, 2, lambda i, j: i/(j + 1) )
Matrix([
[0, 0],
[1, 1/2]])
"""
from sympy.matrices.sparse import SparseMatrix
# Matrix(SparseMatrix(...))
if len(args) == 1 and isinstance(args[0], SparseMatrix):
return args[0].rows, args[0].cols, flatten(args[0].tolist())
# Matrix(Matrix(...))
if len(args) == 1 and isinstance(args[0], MatrixBase):
return args[0].rows, args[0].cols, args[0]._mat
# Matrix(MatrixSymbol('X', 2, 2))
if len(args) == 1 and isinstance(args[0], Basic) and args[0].is_Matrix:
return args[0].rows, args[0].cols, args[0].as_explicit()._mat
if len(args) == 3:
rows = as_int(args[0])
cols = as_int(args[1])
# Matrix(2, 2, lambda i, j: i+j)
if len(args) == 3 and callable(args[2]):
operation = args[2]
flat_list = []
for i in range(rows):
flat_list.extend([cls._sympify(operation(cls._sympify(i), j))
for j in range(cols)])
# Matrix(2, 2, [1, 2, 3, 4])
elif len(args) == 3 and is_sequence(args[2]):
flat_list = args[2]
if len(flat_list) != rows*cols:
raise ValueError('List length should be equal to rows*columns')
flat_list = map(lambda i: cls._sympify(i), flat_list)
# Matrix(numpy.ones((2, 2)))
elif len(args) == 1 and hasattr(args[0], "__array__"): # pragma: no cover
# NumPy array or matrix or some other object that implements
# __array__. So let's first use this method to get a
# numpy.array() and then make a python list out of it.
arr = args[0].__array__()
if len(arr.shape) == 2:
rows, cols = arr.shape[0], arr.shape[1]
flat_list = map(lambda i: cls._sympify(i), arr.ravel())
return rows, cols, flat_list
elif len(arr.shape) == 1:
rows, cols = 1, arr.shape[0]
flat_list = [S.Zero]*cols
for i in range(len(arr)):
flat_list[i] = cls._sympify(arr[i])
return rows, cols, flat_list
else:
raise NotImplementedError(
"SymPy supports just 1D and 2D matrices")
# Matrix([1, 2, 3]) or Matrix([[1, 2], [3, 4]])
elif len(args) == 1 and is_sequence(args[0]):
in_mat = []
ncol = set()
for row in args[0]:
if isinstance(row, MatrixBase):
in_mat.extend(row.tolist())
if row.cols or row.rows: # only pay attention if it's not 0x0
ncol.add(row.cols)
else:
in_mat.append(row)
try:
ncol.add(len(row))
except TypeError:
ncol.add(1)
if len(ncol) > 1:
raise ValueError("Got rows of variable lengths: %s" %
sorted(list(ncol)))
rows = len(in_mat)
if rows:
if not is_sequence(in_mat[0]):
cols = 1
flat_list = map(lambda i: cls._sympify(i), in_mat)
return rows, cols, flat_list
cols = ncol.pop()
else:
cols = 0
flat_list = []
for j in range(rows):
for i in range(cols):
flat_list.append(cls._sympify(in_mat[j][i]))
# Matrix()
elif len(args) == 0:
# Empty Matrix
rows = cols = 0
flat_list = []
else:
raise TypeError("Data type not understood")
return rows, cols, flat_list
def _setitem(self, key, value):
"""Helper to set value at location given by key.
Examples
========
>>> from sympy import Matrix, I, zeros, ones
>>> m = Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m[1, 0] = 9
>>> m
Matrix([
[1, 2 + I],
[9, 4]])
>>> m[1, 0] = [[0, 1]]
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = zeros(4)
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2; M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
from dense import Matrix
is_slice = isinstance(key, slice)
i, j = key = self.key2ij(key)
is_mat = isinstance(value, MatrixBase)
if type(i) is slice or type(j) is slice:
if is_mat:
self.copyin_matrix(key, value)
return
if not isinstance(value, Expr) and is_sequence(value):
self.copyin_list(key, value)
return
raise ValueError('unexpected value: %s' % value)
else:
if (not is_mat and
not isinstance(value, Basic) and is_sequence(value)):
value = Matrix(value)
is_mat = True
if is_mat:
if is_slice:
key = (slice(*divmod(i, self.cols)),
slice(*divmod(j, self.cols)))
else:
key = (slice(i, i + value.rows),
slice(j, j + value.cols))
self.copyin_matrix(key, value)
else:
return i, j, self._sympify(value)
return
def copy(self):
return self._new(self.rows, self.cols, self._mat)
def trace(self):
if not self.is_square:
raise NonSquareMatrixError()
return self._eval_trace()
def inv(self, method=None, **kwargs):
if not self.is_square:
raise NonSquareMatrixError()
if method is not None:
kwargs['method'] = method
return self._eval_inverse(**kwargs)
def transpose(self):
return self._eval_transpose()
T = property(transpose, None, None, "Matrix transposition.")
def conjugate(self):
return self._eval_conjugate()
C = property(conjugate, None, None, "By-element conjugation.")
def adjoint(self):
"""Conjugate transpose or Hermitian conjugation."""
return self.T.C
@property
def H(self):
"""Return Hermite conjugate.
Examples
========
>>> from sympy import Matrix, I
>>> m = Matrix((0, 1 + I, 2, 3))
>>> m
Matrix([
[ 0],
[1 + I],
[ 2],
[ 3]])
>>> m.H
Matrix([[0, 1 - I, 2, 3]])
See Also
========
conjugate: By-element conjugation
D: Dirac conjugation
"""
return self.T.C
@property
def D(self):
"""Return Dirac conjugate (if self.rows == 4).
Examples
========
>>> from sympy import Matrix, I, eye
>>> m = Matrix((0, 1 + I, 2, 3))
>>> m.D
Matrix([[0, 1 - I, -2, -3]])
>>> m = (eye(4) + I*eye(4))
>>> m[0, 3] = 2
>>> m.D
Matrix([
[1 - I, 0, 0, 0],
[ 0, 1 - I, 0, 0],
[ 0, 0, -1 + I, 0],
[ 2, 0, 0, -1 + I]])
If the matrix does not have 4 rows an AttributeError will be raised
because this property is only defined for matrices with 4 rows.
>>> Matrix(eye(2)).D
Traceback (most recent call last):
...
AttributeError: Matrix has no attribute D.
See Also
========
conjugate: By-element conjugation
H: Hermite conjugation
"""
from sympy.physics.matrices import mgamma
if self.rows != 4:
# In Python 3.2, properties can only return an AttributeError
# so we can't raise a ShapeError -- see commit which added the
# first line of this inline comment. Also, there is no need
# for a message since MatrixBase will raise the AttributeError
raise AttributeError
return self.H*mgamma(0)
def __array__(self):
from dense import matrix2numpy
return matrix2numpy(self)
def __len__(self):
"""Return the number of elements of self.
Implemented mainly so bool(Matrix()) == False.
"""
return self.rows*self.cols
@property
def shape(self):
"""The shape (dimensions) of the matrix as the 2-tuple (rows, cols).
Examples
========
>>> from sympy.matrices import zeros
>>> M = zeros(2, 3)
>>> M.shape
(2, 3)
>>> M.rows
2
>>> M.cols
3
"""
return (self.rows, self.cols)
def __sub__(self, a):
return self + (-a)
def __rsub__(self, a):
return (-self) + a
def __mul__(self, other):
"""Return self*other where other is either a scalar or a matrix
of compatible dimensions.
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> 2*A == A*2 == Matrix([[2, 4, 6], [8, 10, 12]])
True
>>> B = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> A*B
Matrix([
[30, 36, 42],
[66, 81, 96]])
>>> B*A
Traceback (most recent call last):
...
ShapeError: Matrices size mismatch.
>>>
See Also
========
matrix_multiply_elementwise
"""
if getattr(other, 'is_Matrix', False):
# The following implmentation is equivalent, but about 5% slower
#ma, na = A.shape
#mb, nb = B.shape
#
#if na != mb:
# raise ShapeError()
#product = Matrix(ma, nb, lambda i, j: 0)
#for i in range(ma):
# for j in range(nb):
# s = 0
# for k in range(na):
# s += A[i, k]*B[k, j]
# product[i, j] = s
#return product
A = self
B = other
if A.cols != B.rows:
raise ShapeError("Matrices size mismatch.")
if A.cols == 0:
return classof(A, B)._new(A.rows, B.cols, lambda i, j: 0)
blst = B.T.tolist()
alst = A.tolist()
return classof(A, B)._new(A.rows, B.cols, lambda i, j:
reduce(lambda k, l: k + l,
[a_ik * b_kj for a_ik, b_kj in zip(alst[i], blst[j])]))
else:
return self._new(self.rows, self.cols,
map(lambda i: i*other, self._mat))
def __rmul__(self, a):
if getattr(a, 'is_Matrix', False):
return self._new(a)*self
return self*a
def __pow__(self, num):
from sympy.matrices import eye
if not self.is_square:
raise NonSquareMatrixError()
if isinstance(num, int) or isinstance(num, Integer):
n = int(num)
if n < 0:
return self.inv()**-n # A**-2 = (A**-1)**2
a = eye(self.cols)
s = self
while n:
if n % 2:
a *= s
n -= 1
if not n:
break
s *= s
n //= 2
return self._new(a)
elif isinstance(num, Rational):
try:
P, D = self.diagonalize()
except MatrixError:
raise NotImplementedError(
"Implemented only for diagonalizable matrices")
for i in range(D.rows):
D[i, i] = D[i, i]**num
return self._new(P*D*P.inv())
else:
raise NotImplementedError(
"Only integer and rational values are supported")
def __add__(self, other):
"""Return self + other, raising ShapeError if shapes don't match."""
if getattr(other, 'is_Matrix', False):
A = self
B = other
if A.shape != B.shape:
raise ShapeError("Matrices size mismatch.")
alst = A.tolist()
blst = B.tolist()
ret = [S.Zero]*A.rows
for i in range(A.shape[0]):
ret[i] = map(lambda j, k: j + k, alst[i], blst[i])
return classof(A, B)._new(ret)
raise TypeError('cannot add matrix and %s' % type(other))
def __radd__(self, other):
return self + other
def __div__(self, other):
return self*(S.One / other)
def __truediv__(self, other):
return self.__div__(other)
def __neg__(self):
return -1*self
def multiply(self, b):
"""Returns self*b
See Also
========
dot
cross
multiply_elementwise
"""
return self*b
def add(self, b):
"""Return self + b """
return self + b
def table(self, printer, rowsep='\n', colsep=', ', align='right'):
r"""
String form of Matrix as a table.
``printer`` is the printer to use for on the elements (generally
something like StrPrinter())
``rowsep`` is the string used to separate rows (by default a newline).
``colsep`` is the string used to separate columns (by default ', ').
``align`` defines how the elements are aligned. Must be one of 'left',
'right', or 'center'. You can also use '<', '>', and '^' to mean the
same thing, respectively.
This is used by the string printer for Matrix.
Examples
========
>>> from sympy import Matrix
>>> from sympy.printing.str import StrPrinter
>>> M = Matrix([[1, 2], [-33, 4]])
>>> printer = StrPrinter()
>>> M.table(printer)
'[ 1, 2]\n[-33, 4]'
>>> print M.table(printer)
[ 1, 2]
[-33, 4]
>>> print M.table(printer, rowsep=',\n')
[ 1, 2],
[-33, 4]
>>> print '[%s]' % M.table(printer, rowsep=',\n')
[[ 1, 2],
[-33, 4]]
>>> print M.table(printer, colsep=' ')
[ 1 2]
[-33 4]
>>> print M.table(printer, align='center')
[ 1 , 2]
[-33, 4]
"""
# Handle zero dimensions:
if self.rows == 0 or self.cols == 0:
return '[]'
# Build table of string representations of the elements
res = []
# Track per-column max lengths for pretty alignment
maxlen = [0] * self.cols
for i in range(self.rows):
res.append([])
for j in range(self.cols):
s = printer._print(self[i,j])
res[-1].append(s)
maxlen[j] = max(len(s), maxlen[j])
# Patch strings together
align = {
'left': str.ljust,
'right': str.rjust,
'center': str.center,
'<': str.ljust,
'>': str.rjust,
'^': str.center,
}[align]
for i, row in enumerate(res):
for j, elem in enumerate(row):
row[j] = align(elem, maxlen[j])
res[i] = "[" + colsep.join(row) + "]"
return rowsep.join(res)
def _format_str(self, printer=None):
if not printer:
from sympy.printing.str import StrPrinter
printer = StrPrinter()
# Handle zero dimensions:
if self.rows == 0 or self.cols == 0:
return 'Matrix(%s, %s, [])' % (self.rows, self.cols)
if self.rows == 1:
return "Matrix([%s])" % self.table(printer, rowsep=',\n')
return "Matrix([\n%s])" % self.table(printer, rowsep=',\n')
def __str__(self):
if self.rows == 0 or self.cols == 0:
return 'Matrix(%s, %s, [])' % (self.rows, self.cols)
return "Matrix(%s)" % str(self.tolist())
def __repr__(self):
return sstr(self)
def cholesky(self):
"""Returns the Cholesky decomposition L of a matrix A
such that L * L.T = A
A must be a square, symmetric, positive-definite
and non-singular matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> A.cholesky()
Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
>>> A.cholesky() * A.cholesky().T
Matrix([
[25, 15, -5],
[15, 18, 0],
[-5, 0, 11]])
See Also
========
LDLdecomposition
LUdecomposition
QRdecomposition
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if not self.is_symmetric():
raise ValueError("Matrix must be symmetric.")
return self._cholesky()
def LDLdecomposition(self):
"""Returns the LDL Decomposition (L, D) of matrix A,
such that L * D * L.T == A
This method eliminates the use of square root.
Further this ensures that all the diagonal entries of L are 1.
A must be a square, symmetric, positive-definite
and non-singular matrix.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0, 0],
[ 3/5, 1, 0],
[-1/5, 1/3, 1]])
>>> D
Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
>>> L * D * L.T * A.inv() == eye(A.rows)
True
See Also
========
cholesky
LUdecomposition
QRdecomposition
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if not self.is_symmetric():
raise ValueError("Matrix must be symmetric.")
return self._LDLdecomposition()
def lower_triangular_solve(self, rhs):
"""Solves Ax = B, where A is a lower triangular matrix.
See Also
========
upper_triangular_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if rhs.rows != self.rows:
raise ShapeError("Matrices size mismatch.")
if not self.is_lower:
raise ValueError("Matrix must be lower triangular.")
return self._lower_triangular_solve(rhs)
def upper_triangular_solve(self, rhs):
"""Solves Ax = B, where A is an upper triangular matrix.
See Also
========
lower_triangular_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if rhs.rows != self.rows:
raise TypeError("Matrix size mismatch.")
if not self.is_upper:
raise TypeError("Matrix is not upper triangular.")
return self._upper_triangular_solve(rhs)
def cholesky_solve(self, rhs):
"""Solves Ax = B using Cholesky decomposition,
for a general square non-singular matrix.
For a non-square matrix with rows > cols,
the least squares solution is returned.
See Also
========
lower_triangular_solve
upper_triangular_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
"""
if self.is_symmetric():
L = self._cholesky()
elif self.rows >= self.cols:
L = (self.T*self)._cholesky()
rhs = self.T*rhs
else:
raise NotImplementedError("Under-determined System.")
Y = L._lower_triangular_solve(rhs)
return (L.T)._upper_triangular_solve(Y)
def diagonal_solve(self, rhs):
"""Solves Ax = B efficiently, where A is a diagonal Matrix,
with non-zero diagonal entries.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = eye(2)*2
>>> B = Matrix([[1, 2], [3, 4]])
>>> A.diagonal_solve(B) == B/2
True
See Also
========
lower_triangular_solve
upper_triangular_solve
cholesky_solve
LDLsolve
LUsolve
QRsolve
"""
if not self.is_diagonal:
raise TypeError("Matrix should be diagonal")
if rhs.rows != self.rows:
raise TypeError("Size mis-match")
return self._diagonal_solve(rhs)
def LDLsolve(self, rhs):
"""Solves Ax = B using LDL decomposition,
for a general square and non-singular matrix.
For a non-square matrix with rows > cols,
the least squares solution is returned.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = eye(2)*2
>>> B = Matrix([[1, 2], [3, 4]])
>>> A.LDLsolve(B) == B/2
True
See Also
========
LDLdecomposition
lower_triangular_solve
upper_triangular_solve
cholesky_solve
diagonal_solve
LUsolve
QRsolve
"""
if self.is_symmetric():
L, D = self.LDLdecomposition()
elif self.rows >= self.cols:
L, D = (self.T*self).LDLdecomposition()
rhs = self.T*rhs
else:
raise NotImplementedError("Under-determined System.")
Y = L._lower_triangular_solve(rhs)
Z = D._diagonal_solve(Y)
return (L.T)._upper_triangular_solve(Z)
def solve_least_squares(self, rhs, method='CH'):
"""Return the least-square fit to the data.
By default the cholesky_solve routine is used (method='CH'); other
methods of matrix inversion can be used. To find out which are
available, see the docstring of the .inv() method.
Examples
========
>>> from sympy.matrices import Matrix, ones
>>> A = Matrix([1, 2, 3])
>>> B = Matrix([2, 3, 4])
>>> S = Matrix(A.row_join(B))
>>> S
Matrix([
[1, 2],
[2, 3],
[3, 4]])
If each line of S represent coefficients of Ax + By
and x and y are [2, 3] then S*xy is:
>>> r = S*Matrix([2, 3]); r
Matrix([
[ 8],
[13],
[18]])
But let's add 1 to the middle value and then solve for the
least-squares value of xy:
>>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy
Matrix([
[ 5/3],
[10/3]])
The error is given by S*xy - r:
>>> S*xy - r
Matrix([
[1/3],
[1/3],
[1/3]])
>>> _.norm().n(2)
0.58
If a different xy is used, the norm will be higher:
>>> xy += ones(2, 1)/10
>>> (S*xy - r).norm().n(2)
1.5
"""
if method == 'CH':
return self.cholesky_solve(rhs)
t = self.T
return (t*self).inv(method=method)*t*rhs
def solve(self, rhs, method='GE'):
"""Return solution to self*soln = rhs using given inversion method.
For a list of possible inversion methods, see the .inv() docstring.
"""
if not self.is_square:
if self.rows < self.cols:
raise ValueError('Under-determined system.')
elif self.rows > self.cols:
raise ValueError('For over-determined system, M, having '
'more rows than columns, try M.solve_least_squares(rhs).')
else:
return self.inv(method=method)*rhs
def __mathml__(self):
mml = ""
for i in range(self.rows):
mml += "<matrixrow>"
for j in range(self.cols):
mml += self[i, j].__mathml__()
mml += "</matrixrow>"
return "<matrix>" + mml + "</matrix>"
def submatrix(self, keys):
"""
Get a slice/submatrix of the matrix using the given slice.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(4, 4, lambda i, j: i+j)
>>> m
Matrix([
[0, 1, 2, 3],
[1, 2, 3, 4],
[2, 3, 4, 5],
[3, 4, 5, 6]])
>>> m[:1, 1]
Matrix([[1]])
>>> m[:2, :1]
Matrix([
[0],
[1]])
>>> m[2:4, 2:4]
Matrix([
[4, 5],
[5, 6]])
See Also
========
extract
"""
rlo, rhi, clo, chi = self.key2bounds(keys)
rows, cols = rhi - rlo, chi - clo
mat = [S.Zero]*rows*cols
for i in range(rows):
mat[i*cols:(i + 1)*cols] = \
self._mat[(i + rlo)*self.cols + clo:(i + rlo)*self.cols + chi]
return self._new(rows, cols, mat)
def extract(self, rowsList, colsList):
"""Return a submatrix by specifying a list of rows and columns.
Negative indices can be given. All indices must be in the range
-n <= i < n where n is the number of rows or columns.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(4, 3, range(12))
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11]])
>>> m.extract([0, 1, 3], [0, 1])
Matrix([
[0, 1],
[3, 4],
[9, 10]])
Rows or columns can be repeated:
>>> m.extract([0, 0, 1], [-1])
Matrix([
[2],
[2],
[5]])
Every other row can be taken by using range to provide the indices:
>>> m.extract(range(0, m.rows, 2), [-1])
Matrix([
[2],
[8]])
See Also
========
submatrix
"""
cols = self.cols
flat_list = self._mat
rowsList = [a2idx(k, self.rows) for k in rowsList]
colsList = [a2idx(k, self.cols) for k in colsList]
return self._new(len(rowsList), len(colsList),
lambda i, j: flat_list[rowsList[i]*cols + colsList[j]])
def key2bounds(self, keys):
"""Converts a key with potentially mixed types of keys (integer and slice)
into a tuple of ranges and raises an error if any index is out of self's
range.
See Also
========
key2ij
"""
islice, jslice = [isinstance(k, slice) for k in keys]
if islice:
if not self.rows:
rlo = rhi = 0
else:
rlo, rhi = keys[0].indices(self.rows)[:2]
else:
rlo = a2idx(keys[0], self.rows)
rhi = rlo + 1
if jslice:
if not self.cols:
clo = chi = 0
else:
clo, chi = keys[1].indices(self.cols)[:2]
else:
clo = a2idx(keys[1], self.cols)
chi = clo + 1
return rlo, rhi, clo, chi
def key2ij(self, key):
"""Converts key into canonical form, converting integers or indexable
items into valid integers for self's range or returning slices
unchanged.
See Also
========
key2bounds
"""
if is_sequence(key):
if not len(key) == 2:
raise TypeError('key must be a sequence of length 2')
return [a2idx(i, n) if not isinstance(i, slice) else i
for i, n in zip(key, self.shape)]
elif isinstance(key, slice):
return key.indices(len(self))[:2]
else:
return divmod(a2idx(key, len(self)), self.cols)
def evalf(self, prec=None, **options):
"""Apply evalf() to each element of self."""
if prec is None:
return self.applyfunc(lambda i: i.evalf(**options))
else:
return self.applyfunc(lambda i: i.evalf(prec, **options))
n = evalf
def subs(self, *args, **kwargs): # should mirror core.basic.subs
"""Return a new matrix with subs applied to each entry.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.matrices import SparseMatrix, Matrix
>>> SparseMatrix(1, 1, [x])
Matrix([[x]])
>>> _.subs(x, y)
Matrix([[y]])
>>> Matrix(_).subs(y, x)
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.subs(*args, **kwargs))
def expand(self, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
"""Apply core.function.expand to each entry of the matrix.
Examples
========
>>> from sympy.abc import x
>>> from sympy.matrices import Matrix
>>> Matrix(1, 1, [x*(x+1)])
Matrix([[x*(x + 1)]])
>>> _.expand()
Matrix([[x**2 + x]])
"""
return self.applyfunc(lambda x: x.expand(
deep, modulus, power_base, power_exp, mul, log, multinomial, basic,
**hints))
def simplify(self, ratio=1.7, measure=count_ops):
"""Apply simplify to each element of the matrix.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import sin, cos
>>> from sympy.matrices import SparseMatrix
>>> SparseMatrix(1, 1, [x*sin(y)**2 + x*cos(y)**2])
Matrix([[x*sin(y)**2 + x*cos(y)**2]])
>>> _.simplify()
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.simplify(ratio, measure))
_eval_simplify = simplify
def doit(self, **kwargs):
return self
def print_nonzero(self, symb="X"):
"""Shows location of non-zero entries for fast shape lookup.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> m = Matrix(2, 3, lambda i, j: i*3+j)
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5]])
>>> m.print_nonzero()
[ XX]
[XXX]
>>> m = eye(4)
>>> m.print_nonzero("x")
[x ]
[ x ]
[ x ]
[ x]
"""
s = []
for i in range(self.rows):
line = []
for j in range(self.cols):
if self[i, j] == 0:
line.append(" ")
else:
line.append(str(symb))
s.append("[%s]" % ''.join(line))
print '\n'.join(s)
def LUsolve(self, rhs, iszerofunc=_iszero):
"""Solve the linear system Ax = rhs for x where A = self.
This is for symbolic matrices, for real or complex ones use
sympy.mpmath.lu_solve or sympy.mpmath.qr_solve.
See Also
========
lower_triangular_solve
upper_triangular_solve
cholesky_solve
diagonal_solve
LDLsolve
QRsolve
LUdecomposition
"""
if rhs.rows != self.rows:
raise ShapeError("`self` and `rhs` must have the same number of rows.")
A, perm = self.LUdecomposition_Simple(iszerofunc=_iszero)
n = self.rows
b = rhs.permuteFwd(perm).as_mutable()
# forward substitution, all diag entries are scaled to 1
for i in xrange(n):
for j in xrange(i):
scale = A[i, j]
b.zip_row_op(i, j, lambda x, y: x - y*scale)
# backward substitution
for i in xrange(n - 1, -1, -1):
for j in xrange(i + 1, n):
scale = A[i, j]
b.zip_row_op(i, j, lambda x, y: x - y*scale)
scale = A[i, i]
b.row_op(i, lambda x, _: x/scale)
return rhs.__class__(b)
def LUdecomposition(self, iszerofunc=_iszero):
"""Returns the decomposition LU and the row swaps p.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[4, 3], [6, 3]])
>>> L, U, _ = a.LUdecomposition()
>>> L
Matrix([
[ 1, 0],
[3/2, 1]])
>>> U
Matrix([
[4, 3],
[0, -3/2]])
See Also
========
cholesky
LDLdecomposition
QRdecomposition
LUdecomposition_Simple
LUdecompositionFF
LUsolve
"""
combined, p = self.LUdecomposition_Simple(iszerofunc=_iszero)
L = self.zeros(self.rows)
U = self.zeros(self.rows)
for i in range(self.rows):
for j in range(self.rows):
if i > j:
L[i, j] = combined[i, j]
else:
if i == j:
L[i, i] = 1
U[i, j] = combined[i, j]
return L, U, p
def LUdecomposition_Simple(self, iszerofunc=_iszero):
"""Returns A comprised of L, U (L's diag entries are 1) and
p which is the list of the row swaps (in order).
See Also
========
LUdecomposition
LUdecompositionFF
LUsolve
"""
if not self.is_square:
raise NonSquareMatrixError("A Matrix must be square to apply LUdecomposition_Simple().")
n = self.rows
A = self.as_mutable()
p = []
# factorization
for j in range(n):
for i in range(j):
for k in range(i):
A[i, j] = A[i, j] - A[i, k]*A[k, j]
pivot = -1
for i in range(j, n):
for k in range(j):
A[i, j] = A[i, j] - A[i, k]*A[k, j]
# find the first non-zero pivot, includes any expression
if pivot == -1 and not iszerofunc(A[i, j]):
pivot = i
if pivot < 0:
# this result is based on iszerofunc's analysis of the possible pivots, so even though
# the element may not be strictly zero, the supplied iszerofunc's evaluation gave True
raise ValueError("No nonzero pivot found; inversion failed.")
if pivot != j: # row must be swapped
A.row_swap(pivot, j)
p.append([pivot, j])
scale = 1 / A[j, j]
for i in range(j + 1, n):
A[i, j] = A[i, j]*scale
return A, p
def LUdecompositionFF(self):
"""Compute a fraction-free LU decomposition.
Returns 4 matrices P, L, D, U such that PA = L D**-1 U.
If the elements of the matrix belong to some integral domain I, then all
elements of L, D and U are guaranteed to belong to I.
**Reference**
- W. Zhou & D.J. Jeffrey, "Fraction-free matrix factors: new forms
for LU and QR factors". Frontiers in Computer Science in China,
Vol 2, no. 1, pp. 67-80, 2008.
See Also
========
LUdecomposition
LUdecomposition_Simple
LUsolve
"""
from sympy.matrices import SparseMatrix
zeros = SparseMatrix.zeros
eye = SparseMatrix.eye
n, m = self.rows, self.cols
U, L, P = self.as_mutable(), eye(n), eye(n)
DD = zeros(n, n)
oldpivot = 1
for k in range(n - 1):
if U[k, k] == 0:
for kpivot in range(k + 1, n):
if U[kpivot, k]:
break
else:
raise ValueError("Matrix is not full rank")
U[k, k:], U[kpivot, k:] = U[kpivot, k:], U[k, k:]
L[k, :k], L[kpivot, :k] = L[kpivot, :k], L[k, :k]
P[k, :], P[kpivot, :] = P[kpivot, :], P[k, :]
L[k, k] = Ukk = U[k, k]
DD[k, k] = oldpivot*Ukk
for i in range(k + 1, n):
L[i, k] = Uik = U[i, k]
for j in range(k + 1, m):
U[i, j] = (Ukk*U[i, j] - U[k, j]*Uik) / oldpivot
U[i, k] = 0
oldpivot = Ukk
DD[n - 1, n - 1] = oldpivot
return P, L, DD, U
def cofactorMatrix(self, method="berkowitz"):
"""Return a matrix containing the cofactor of each element.
See Also
========
cofactor
minorEntry
minorMatrix
adjugate
"""
out = self._new(self.rows, self.cols, lambda i, j:
self.cofactor(i, j, method))
return out
def minorEntry(self, i, j, method="berkowitz"):
"""Calculate the minor of an element.
See Also
========
minorMatrix
cofactor
cofactorMatrix
"""
if not 0 <= i < self.rows or not 0 <= j < self.cols:
raise ValueError("`i` and `j` must satisfy 0 <= i < `self.rows` " +
"(%d)" % self.rows + "and 0 <= j < `self.cols` (%d)." % self.cols)
return self.minorMatrix(i, j).det(method)
def minorMatrix(self, i, j):
"""Creates the minor matrix of a given element.
See Also
========
minorEntry
cofactor
cofactorMatrix
"""
if not 0 <= i < self.rows or not 0 <= j < self.cols:
raise ValueError("`i` and `j` must satisfy 0 <= i < `self.rows` " +
"(%d)" % self.rows + "and 0 <= j < `self.cols` (%d)." % self.cols)
M = self.as_mutable()
M.row_del(i)
M.col_del(j)
return self._new(M)
def cofactor(self, i, j, method="berkowitz"):
"""Calculate the cofactor of an element.
See Also
========
cofactorMatrix
minorEntry
minorMatrix
"""
if (i + j) % 2 == 0:
return self.minorEntry(i, j, method)
else:
return -1*self.minorEntry(i, j, method)
def jacobian(self, X):
"""Calculates the Jacobian matrix (derivative of a vectorial function).
Parameters
==========
self : vector of expressions representing functions f_i(x_1, ..., x_n).
X : set of x_i's in order, it can be a list or a Matrix
Both self and X can be a row or a column matrix in any order
(i.e., jacobian() should always work).
Examples
========
>>> from sympy import sin, cos, Matrix
>>> from sympy.abc import rho, phi
>>> X = Matrix([rho*cos(phi), rho*sin(phi), rho**2])
>>> Y = Matrix([rho, phi])
>>> X.jacobian(Y)
Matrix([
[cos(phi), -rho*sin(phi)],
[sin(phi), rho*cos(phi)],
[ 2*rho, 0]])
>>> X = Matrix([rho*cos(phi), rho*sin(phi)])
>>> X.jacobian(Y)
Matrix([
[cos(phi), -rho*sin(phi)],
[sin(phi), rho*cos(phi)]])
See Also
========
hessian
wronskian
"""
if not isinstance(X, MatrixBase):
X = self._new(X)
# Both X and self can be a row or a column matrix, so we need to make
# sure all valid combinations work, but everything else fails:
if self.shape[0] == 1:
m = self.shape[1]
elif self.shape[1] == 1:
m = self.shape[0]
else:
raise TypeError("self must be a row or a column matrix")
if X.shape[0] == 1:
n = X.shape[1]
elif X.shape[1] == 1:
n = X.shape[0]
else:
raise TypeError("X must be a row or a column matrix")
# m is the number of functions and n is the number of variables
# computing the Jacobian is now easy:
return self._new(m, n, lambda j, i: self[j].diff(X[i]))
def QRdecomposition(self):
"""Return Q, R where A = Q*R, Q is orthogonal and R is upper triangular.
Examples
========
This is the example from wikipedia:
>>> from sympy import Matrix
>>> A = Matrix([[12, -51, 4], [6, 167, -68], [-4, 24, -41]])
>>> Q, R = A.QRdecomposition()
>>> Q
Matrix([
[ 6/7, -69/175, -58/175],
[ 3/7, 158/175, 6/175],
[-2/7, 6/35, -33/35]])
>>> R
Matrix([
[14, 21, -14],
[ 0, 175, -70],
[ 0, 0, 35]])
>>> A == Q*R
True
QR factorization of an identity matrix:
>>> A = Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> Q, R = A.QRdecomposition()
>>> Q
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> R
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
cholesky
LDLdecomposition
LUdecomposition
QRsolve
"""
cls = self.__class__
self = self.as_mutable()
if not self.rows >= self.cols:
raise MatrixError(
"The number of rows must be greater than columns")
n = self.rows
m = self.cols
rank = n
row_reduced = self.rref()[0]
for i in range(row_reduced.rows):
if row_reduced.row(i).norm() == 0:
rank -= 1
if not rank == self.cols:
raise MatrixError("The rank of the matrix must match the columns")
Q, R = self.zeros(n, m), self.zeros(m)
for j in range(m): # for each column vector
tmp = self[:, j] # take original v
for i in range(j):
# subtract the project of self on new vector
tmp -= Q[:, i]*self[:, j].dot(Q[:, i])
tmp.expand()
# normalize it
R[j, j] = tmp.norm()
Q[:, j] = tmp / R[j, j]
if Q[:, j].norm() != 1:
raise NotImplementedError(
"Could not normalize the vector %d." % j)
for i in range(j):
R[i, j] = Q[:, i].dot(self[:, j])
return cls(Q), cls(R)
def QRsolve(self, b):
"""Solve the linear system 'Ax = b'.
'self' is the matrix 'A', the method argument is the vector
'b'. The method returns the solution vector 'x'. If 'b' is a
matrix, the system is solved for each column of 'b' and the
return value is a matrix of the same shape as 'b'.
This method is slower (approximately by a factor of 2) but
more stable for floating-point arithmetic than the LUsolve method.
However, LUsolve usually uses an exact arithmetic, so you don't need
to use QRsolve.
This is mainly for educational purposes and symbolic matrices, for real
(or complex) matrices use sympy.mpmath.qr_solve.
See Also
========
lower_triangular_solve
upper_triangular_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRdecomposition
"""
Q, R = self.as_mutable().QRdecomposition()
y = Q.T*b
# back substitution to solve R*x = y:
# We build up the result "backwards" in the vector 'x' and reverse it
# only in the end.
x = []
n = R.rows
for j in range(n - 1, -1, -1):
tmp = y[j, :]
for k in range(j + 1, n):
tmp -= R[j, k]*x[n - 1 - k]
x.append(tmp / R[j, j])
return self._new([row._mat for row in reversed(x)])
def cross(self, b):
"""Calculate the cross product of ``self`` and ``b``.
See Also
========
dot
multiply
multiply_elementwise
"""
if not is_sequence(b):
raise TypeError("`b` must be an ordered iterable or Matrix, not %s." %
type(b))
if not ((self.rows == 1 and self.cols == 3 or
self.rows == 3 and self.cols == 1) and \
(b.rows == 1 and b.cols == 3 or
b.rows == 3 and b.cols == 1)):
raise ShapeError("Dimensions incorrect for cross product.")
else:
return self._new(1, 3, ((self[1]*b[2] - self[2]*b[1]),
(self[2]*b[0] - self[0]*b[2]),
(self[0]*b[1] - self[1]*b[0])))
def dot(self, b):
"""Return the dot product of Matrix self and b relaxing the condition
of compatible dimensions: if either the number of rows or columns are
the same as the length of b then the dot product is returned. If self
is a row or column vector, a scalar is returned. Otherwise, a list
of results is returned (and in that case the number of columns in self
must match the length of b).
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> v = [1, 1, 1]
>>> M.row(0).dot(v)
6
>>> M.col(0).dot(v)
12
>>> M.dot(v)
[6, 15, 24]
See Also
========
cross
multiply
multiply_elementwise
"""
from dense import Matrix
if not isinstance(b, MatrixBase):
if is_sequence(b):
if len(b) != self.cols and len(b) != self.rows:
raise ShapeError("Dimensions incorrect for dot product.")
return self.dot(Matrix(b))
else:
raise TypeError("`b` must be an ordered iterable or Matrix, not %s." %
type(b))
if self.cols == b.rows:
if b.cols != 1:
self = self.T
b = b.T
prod = flatten((self*b).tolist())
if len(prod) == 1:
return prod[0]
return prod
if self.cols == b.cols:
return self.dot(b.T)
elif self.rows == b.rows:
return self.T.dot(b)
else:
raise ShapeError("Dimensions incorrect for dot product.")
def multiply_elementwise(self, b):
"""Return the Hadamard product (elementwise product) of A and B
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([[0, 1, 2], [3, 4, 5]])
>>> B = Matrix([[1, 10, 100], [100, 10, 1]])
>>> A.multiply_elementwise(B)
Matrix([
[ 0, 10, 200],
[300, 40, 5]])
See Also
========
cross
dot
multiply
"""
from sympy.matrices import matrix_multiply_elementwise
return matrix_multiply_elementwise(self, b)
def values(self):
"""Return non-zero values of self."""
return [i for i in flatten(self.tolist()) if not i.is_zero]
def norm(self, ord=None):
"""Return the Norm of a Matrix or Vector.
In the simplest case this is the geometric size of the vector
Other norms can be specified by the ord parameter
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm - does not exist
inf -- max(abs(x))
-inf -- min(abs(x))
1 -- as below
-1 -- as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other - does not exist sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
Examples
========
>>> from sympy import Matrix, Symbol, trigsimp, cos, sin, oo
>>> x = Symbol('x', real=True)
>>> v = Matrix([cos(x), sin(x)])
>>> trigsimp( v.norm() )
1
>>> v.norm(10)
(sin(x)**10 + cos(x)**10)**(1/10)
>>> A = Matrix([[1, 1], [1, 1]])
>>> A.norm(2)# Spectral norm (max of |Ax|/|x| under 2-vector-norm)
2
>>> A.norm(-2) # Inverse spectral norm (smallest singular value)
0
>>> A.norm() # Frobenius Norm
2
>>> Matrix([1, -2]).norm(oo)
2
>>> Matrix([-1, 2]).norm(-oo)
1
See Also
========
normalized
"""
# Row or Column Vector Norms
vals = self.values() or [0]
if self.rows == 1 or self.cols == 1:
if ord == 2 or ord is None: # Common case sqrt(<x, x>)
return sqrt(Add(*(abs(i)**2 for i in vals)))
elif ord == 1: # sum(abs(x))
return Add(*(abs(i) for i in vals))
elif ord == S.Infinity: # max(abs(x))
return Max(*[abs(i) for i in vals])
elif ord == S.NegativeInfinity: # min(abs(x))
return Min(*[abs(i) for i in vals])
# Otherwise generalize the 2-norm, Sum(x_i**ord)**(1/ord)
# Note that while useful this is not mathematically a norm
try:
return Pow(Add(*(abs(i)**ord for i in vals)), S(1) / ord)
except (NotImplementedError, TypeError):
raise ValueError("Expected order to be Number, Symbol, oo")
# Matrix Norms
else:
if ord == 2: # Spectral Norm
# Maximum singular value
return Max(*self.singular_values())
elif ord == -2:
# Minimum singular value
return Min(*self.singular_values())
elif (ord is None or isinstance(ord, str) and ord.lower() in
['f', 'fro', 'frobenius', 'vector']):
# Reshape as vector and send back to norm function
return self.vec().norm(ord=2)
else:
raise NotImplementedError("Matrix Norms under development")
def normalized(self):
"""Return the normalized version of ``self``.
See Also
========
norm
"""
if self.rows != 1 and self.cols != 1:
raise ShapeError("A Matrix must be a vector to normalize.")
norm = self.norm()
out = self.applyfunc(lambda i: i / norm)
return out
def project(self, v):
"""Return the projection of ``self`` onto the line containing ``v``.
Examples
========
>>> from sympy import Matrix, S, sqrt
>>> V = Matrix([sqrt(3)/2, S.Half])
>>> x = Matrix([[1, 0]])
>>> V.project(x)
Matrix([[sqrt(3)/2, 0]])
>>> V.project(-x)
Matrix([[sqrt(3)/2, 0]])
"""
return v*(self.dot(v) / v.dot(v))
def permuteBkwd(self, perm):
"""Permute the rows of the matrix with the given permutation in reverse.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.permuteBkwd([[0, 1], [0, 2]])
Matrix([
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]])
See Also
========
permuteFwd
"""
copy = self.copy()
for i in range(len(perm) - 1, -1, -1):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def permuteFwd(self, perm):
"""Permute the rows of the matrix with the given permutation.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.permuteFwd([[0, 1], [0, 2]])
Matrix([
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
See Also
========
permuteBkwd
"""
copy = self.copy()
for i in range(len(perm)):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def exp(self):
"""Return the exponentiation of a square matrix."""
if not self.is_square:
raise NonSquareMatrixError(
"Exponentiation is valid only for square matrices")
try:
U, D = self.diagonalize()
except MatrixError:
raise NotImplementedError("Exponentiation is implemented only for diagonalizable matrices")
for i in range(0, D.rows):
D[i, i] = C.exp(D[i, i])
return U*D*U.inv()
@property
def is_square(self):
"""Checks if a matrix is square.
A matrix is square if the number of rows equals the number of columns.
The empty matrix is square by definition, since the number of rows and
the number of columns are both zero.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[1, 2, 3], [4, 5, 6]])
>>> b = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> c = Matrix([])
>>> a.is_square
False
>>> b.is_square
True
>>> c.is_square
True
"""
return self.rows == self.cols
@property
def is_zero(self):
"""Checks if a matrix is a zero matrix.
A matrix is zero if every element is zero. A matrix need not be square
to be considered zero. The empty matrix is zero by the principle of
vacuous truth.
Examples
========
>>> from sympy import Matrix, zeros
>>> a = Matrix([[0, 0], [0, 0]])
>>> b = zeros(3, 4)
>>> c = Matrix([[0, 1], [0, 0]])
>>> d = Matrix([])
>>> a.is_zero
True
>>> b.is_zero
True
>>> c.is_zero
False
>>> d.is_zero
True
"""
return not self.values()
def is_nilpotent(self):
"""Checks if a matrix is nilpotent.
A matrix B is nilpotent if for some integer k, B**k is
a zero matrix.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[0, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> a.is_nilpotent()
True
>>> a = Matrix([[1, 0, 1], [1, 0, 0], [1, 1, 0]])
>>> a.is_nilpotent()
False
"""
if not self.is_square:
raise NonSquareMatrixError(
"Nilpotency is valid only for square matrices")
x = Dummy('x')
if self.charpoly(x).args[0] == x**self.rows:
return True
return False
@property
def is_upper(self):
"""Check if matrix is an upper triangular matrix. True can be returned
even if the matrix is not square.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [1, 0, 0, 1])
>>> m
Matrix([
[1, 0],
[0, 1]])
>>> m.is_upper
True
>>> m = Matrix(4, 3, [5, 1, 9, 0, 4 , 6, 0, 0, 5, 0, 0, 0])
>>> m
Matrix([
[5, 1, 9],
[0, 4, 6],
[0, 0, 5],
[0, 0, 0]])
>>> m.is_upper
True
>>> m = Matrix(2, 3, [4, 2, 5, 6, 1, 1])
>>> m
Matrix([
[4, 2, 5],
[6, 1, 1]])
>>> m.is_upper
False
See Also
========
is_lower
is_diagonal
is_upper_hessenberg
"""
return all(self[i, j].is_zero
for i in range(1, self.rows)
for j in range(i))
@property
def is_lower(self):
"""Check if matrix is a lower triangular matrix. True can be returned
even if the matrix is not square.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [1, 0, 0, 1])
>>> m
Matrix([
[1, 0],
[0, 1]])
>>> m.is_lower
True
>>> m = Matrix(4, 3, [0, 0, 0, 2, 0, 0, 1, 4 , 0, 6, 6, 5])
>>> m
Matrix([
[0, 0, 0],
[2, 0, 0],
[1, 4, 0],
[6, 6, 5]])
>>> m.is_lower
True
>>> from sympy.abc import x, y
>>> m = Matrix(2, 2, [x**2 + y, y**2 + x, 0, x + y])
>>> m
Matrix([
[x**2 + y, x + y**2],
[ 0, x + y]])
>>> m.is_lower
False
See Also
========
is_upper
is_diagonal
is_lower_hessenberg
"""
return all(self[i, j].is_zero
for i in range(self.rows)
for j in range(i + 1, self.cols))
@property
def is_upper_hessenberg(self):
"""Checks if the matrix is the upper-Hessenberg form.
The upper hessenberg matrix has zero entries
below the first subdiagonal.
Examples
========
>>> from sympy.matrices import Matrix
>>> a = Matrix([[1, 4, 2, 3], [3, 4, 1, 7], [0, 2, 3, 4], [0, 0, 1, 3]])
>>> a
Matrix([
[1, 4, 2, 3],
[3, 4, 1, 7],
[0, 2, 3, 4],
[0, 0, 1, 3]])
>>> a.is_upper_hessenberg
True
See Also
========
is_lower_hessenberg
is_upper
"""
return all(self[i, j].is_zero
for i in range(2, self.rows)
for j in range(i - 1))
@property
def is_lower_hessenberg(self):
r"""Checks if the matrix is in the lower-Hessenberg form.
The lower hessenberg matrix has zero entries
above the first superdiagonal.
Examples
========
>>> from sympy.matrices import Matrix
>>> a = Matrix([[1, 2, 0, 0], [5, 2, 3, 0], [3, 4, 3, 7], [5, 6, 1, 1]])
>>> a
Matrix([
[1, 2, 0, 0],
[5, 2, 3, 0],
[3, 4, 3, 7],
[5, 6, 1, 1]])
>>> a.is_lower_hessenberg
True
See Also
========
is_upper_hessenberg
is_lower
"""
return all(self[i, j].is_zero
for i in range(self.rows)
for j in range(i + 2, self.cols))
def is_symbolic(self):
"""Checks if any elements contain Symbols.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.is_symbolic()
True
"""
return any(element.has(Symbol) for element in self.values())
def is_symmetric(self, simplify=True):
"""Check if matrix is symmetric matrix,
that is square matrix and is equal to its transpose.
By default, simplifications occur before testing symmetry.
They can be skipped using 'simplify=False'; while speeding things a bit,
this may however induce false negatives.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [0, 1, 1, 2])
>>> m
Matrix([
[0, 1],
[1, 2]])
>>> m.is_symmetric()
True
>>> m = Matrix(2, 2, [0, 1, 2, 0])
>>> m
Matrix([
[0, 1],
[2, 0]])
>>> m.is_symmetric()
False
>>> m = Matrix(2, 3, [0, 0, 0, 0, 0, 0])
>>> m
Matrix([
[0, 0, 0],
[0, 0, 0]])
>>> m.is_symmetric()
False
>>> from sympy.abc import x, y
>>> m = Matrix(3, 3, [1, x**2 + 2*x + 1, y, (x + 1)**2 , 2, 0, y, 0, 3])
>>> m
Matrix([
[ 1, x**2 + 2*x + 1, y],
[(x + 1)**2, 2, 0],
[ y, 0, 3]])
>>> m.is_symmetric()
True
If the matrix is already simplified, you may speed-up is_symmetric()
test by using 'simplify=False'.
>>> m.is_symmetric(simplify=False)
False
>>> m1 = m.expand()
>>> m1.is_symmetric(simplify=False)
True
"""
if not self.is_square:
return False
if simplify:
delta = self - self.transpose()
delta.simplify()
return delta.equals(self.zeros(self.rows, self.cols))
else:
return self == self.transpose()
def is_anti_symmetric(self, simplify=True):
"""Check if matrix M is an antisymmetric matrix,
that is, M is a square matrix with all M[i, j] == -M[j, i].
When ``simplify=True`` (default), the sum M[i, j] + M[j, i] is
simplified before testing to see if it is zero. By default,
the SymPy simplify function is used. To use a custom function
set simplify to a function that accepts a single argument which
returns a simplified expression. To skip simplification, set
simplify to False but note that although this will be faster,
it may induce false negatives.
Examples
========
>>> from sympy import Matrix, symbols
>>> m = Matrix(2, 2, [0, 1, -1, 0])
>>> m
Matrix([
[ 0, 1],
[-1, 0]])
>>> m.is_anti_symmetric()
True
>>> x, y = symbols('x y')
>>> m = Matrix(2, 3, [0, 0, x, -y, 0, 0])
>>> m
Matrix([
[ 0, 0, x],
[-y, 0, 0]])
>>> m.is_anti_symmetric()
False
>>> from sympy.abc import x, y
>>> m = Matrix(3, 3, [0, x**2 + 2*x + 1, y,
... -(x + 1)**2 , 0, x*y,
... -y, -x*y, 0])
Simplification of matrix elements is done by default so even
though two elements which should be equal and opposite wouldn't
pass an equality test, the matrix is still reported as
anti-symmetric:
>>> m[0, 1] == -m[1, 0]
False
>>> m.is_anti_symmetric()
True
If 'simplify=False' is used for the case when a Matrix is already
simplified, this will speed things up. Here, we see that without
simplification the matrix does not appear anti-symmetric:
>>> m.is_anti_symmetric(simplify=False)
False
But if the matrix were already expanded, then it would appear
anti-symmetric and simplification in the is_anti_symmetric routine
is not needed:
>>> m = m.expand()
>>> m.is_anti_symmetric(simplify=False)
True
"""
# accept custom simplification
simpfunc = simplify if isinstance(simplify, FunctionType) else \
_simplify if simplify else False
if not self.is_square:
return False
n = self.rows
if simplify:
for i in range(n):
# diagonal
if not simpfunc(self[i, i]).is_zero:
return False
# others
for j in range(i + 1, n):
diff = self[i, j] + self[j, i]
if not simpfunc(diff).is_zero:
return False
return True
else:
for i in range(n):
for j in range(i, n):
if self[i, j] != -self[j, i]:
return False
return True
def is_diagonal(self):
"""Check if matrix is diagonal,
that is matrix in which the entries outside the main diagonal are all zero.
Examples
========
>>> from sympy import Matrix, diag
>>> m = Matrix(2, 2, [1, 0, 0, 2])
>>> m
Matrix([
[1, 0],
[0, 2]])
>>> m.is_diagonal()
True
>>> m = Matrix(2, 2, [1, 1, 0, 2])
>>> m
Matrix([
[1, 1],
[0, 2]])
>>> m.is_diagonal()
False
>>> m = diag(1, 2, 3)
>>> m
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> m.is_diagonal()
True
See Also
========
is_lower
is_upper
is_diagonalizable
diagonalize
"""
for i in range(self.rows):
for j in range(self.cols):
if i != j and self[i, j]:
return False
return True
def det(self, method="bareis"):
"""Computes the matrix determinant using the method "method".
Possible values for "method":
bareis ... det_bareis
berkowitz ... berkowitz_det
lu_decomposition ... det_LU
See Also
========
det_bareis
berkowitz_det
det_LU
"""
# if methods were made internal and all determinant calculations
# passed through here, then these lines could be factored out of
# the method routines
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
if method == "bareis":
return self.det_bareis()
elif method == "berkowitz":
return self.berkowitz_det()
elif method == "det_LU":
return self.det_LU_decomposition()
else:
raise ValueError("Determinant method '%s' unrecognized" % method)
def det_bareis(self):
"""Compute matrix determinant using Bareis' fraction-free
algorithm which is an extension of the well known Gaussian
elimination method. This approach is best suited for dense
symbolic matrices and will result in a determinant with
minimal number of fractions. It means that less term
rewriting is needed on resulting formulae.
TODO: Implement algorithm for sparse matrices (SFF),
http://www.eecis.udel.edu/~saunders/papers/sffge/it5.ps.
See Also
========
det
berkowitz_det
"""
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
M, n = self.copy(), self.rows
if n == 1:
det = M[0, 0]
elif n == 2:
det = M[0, 0]*M[1, 1] - M[0, 1]*M[1, 0]
elif n == 3:
det = (M[0, 0]*M[1, 1]*M[2, 2] + M[0, 1]*M[1, 2]*M[2, 0] + M[0, 2]*M[1, 0]*M[2, 1]) - \
(M[0, 2]*M[1, 1]*M[2, 0] + M[0, 0]*M[1, 2]*M[2, 1] + M[0, 1]*M[1, 0]*M[2, 2])
else:
sign = 1 # track current sign in case of column swap
for k in range(n - 1):
# look for a pivot in the current column
# and assume det == 0 if none is found
if M[k, k] == 0:
for i in range(k + 1, n):
if M[i, k]:
M.row_swap(i, k)
sign *= -1
break
else:
return S.Zero
# proceed with Bareis' fraction-free (FF)
# form of Gaussian elimination algorithm
for i in range(k + 1, n):
for j in range(k + 1, n):
D = M[k, k]*M[i, j] - M[i, k]*M[k, j]
if k > 0:
D /= M[k - 1, k - 1]
if D.is_Atom:
M[i, j] = D
else:
M[i, j] = cancel(D)
det = sign*M[n - 1, n - 1]
return det.expand()
def det_LU_decomposition(self):
"""Compute matrix determinant using LU decomposition
Note that this method fails if the LU decomposition itself
fails. In particular, if the matrix has no inverse this method
will fail.
TODO: Implement algorithm for sparse matrices (SFF),
http://www.eecis.udel.edu/~saunders/papers/sffge/it5.ps.
See Also
========
det
det_bareis
berkowitz_det
"""
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
M, n = self.copy(), self.rows
p, prod = [], 1
l, u, p = M.LUdecomposition()
if len(p) % 2:
prod = -1
for k in range(n):
prod = prod*u[k, k]*l[k, k]
return prod.expand()
def adjugate(self, method="berkowitz"):
"""Returns the adjugate matrix.
Adjugate matrix is the transpose of the cofactor matrix.
http://en.wikipedia.org/wiki/Adjugate
See Also
========
cofactorMatrix
transpose
berkowitz
"""
return self.cofactorMatrix(method).T
def inverse_LU(self, iszerofunc=_iszero):
"""Calculates the inverse using LU decomposition.
See Also
========
inv
inverse_GE
inverse_ADJ
"""
if not self.is_square:
raise NonSquareMatrixError()
ok = self.rref(simplify=True)[0]
if any(iszerofunc(ok[j, j]) for j in range(ok.rows)):
raise ValueError("Matrix det == 0; not invertible.")
return self.LUsolve(self.eye(self.rows), iszerofunc=_iszero)
def inverse_GE(self, iszerofunc=_iszero):
"""Calculates the inverse using Gaussian elimination.
See Also
========
inv
inverse_LU
inverse_ADJ
"""
from dense import Matrix
if not self.is_square:
raise NonSquareMatrixError("A Matrix must be square to invert.")
big = Matrix.hstack(self.as_mutable(), Matrix.eye(self.rows))
red = big.rref(iszerofunc=iszerofunc, simplify=True)[0]
if any(iszerofunc(red[j, j]) for j in range(red.rows)):
raise ValueError("Matrix det == 0; not invertible.")
return self._new(red[:, big.rows:])
def inverse_ADJ(self, iszerofunc=_iszero):
"""Calculates the inverse using the adjugate matrix and a determinant.
See Also
========
inv
inverse_LU
inverse_GE
"""
if not self.is_square:
raise NonSquareMatrixError("A Matrix must be square to invert.")
d = self.berkowitz_det()
zero = d.equals(0)
if zero is None:
# if equals() can't decide, will rref be able to?
ok = self.rref(simplify=True)[0]
zero = any(iszerofunc(ok[j, j]) for j in range(ok.rows))
if zero:
raise ValueError("Matrix det == 0; not invertible.")
return self.adjugate() / d
def rref(self, simplified=False, iszerofunc=_iszero,
simplify=False):
"""Return reduced row-echelon form of matrix and indices of pivot vars.
To simplify elements before finding nonzero pivots set simplify=True
(to use the default SymPy simplify function) or pass a custom
simplify function.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x
>>> m = Matrix([[1, 2], [x, 1 - 1/x]])
>>> m.rref()
(Matrix([
[1, 0],
[0, 1]]), [0, 1])
"""
if simplified is not False:
SymPyDeprecationWarning(
feature="'simplified' as a keyword to rref",
useinstead="simplify=True, or set simplify equal to your "
"own custom simplification function",
issue=3382, deprecated_since_version="0.7.2",
).warn()
simplify = simplify or True
simpfunc = simplify if isinstance(
simplify, FunctionType) else _simplify
# pivot: index of next row to contain a pivot
pivot, r = 0, self.as_mutable()
# pivotlist: indices of pivot variables (non-free)
pivotlist = []
for i in xrange(r.cols):
if pivot == r.rows:
break
if simplify:
r[pivot, i] = simpfunc(r[pivot, i])
if iszerofunc(r[pivot, i]):
for k in xrange(pivot, r.rows):
if simplify and k > pivot:
r[k, i] = simpfunc(r[k, i])
if not iszerofunc(r[k, i]):
break
if k == r.rows - 1 and iszerofunc(r[k, i]):
continue
r.row_swap(pivot, k)
scale = r[pivot, i]
r.row_op(pivot, lambda x, _: x / scale)
for j in xrange(r.rows):
if j == pivot:
continue
scale = r[j, i]
r.zip_row_op(j, pivot, lambda x, y: x - scale*y)
pivotlist.append(i)
pivot += 1
return self._new(r), pivotlist
def rank(self, simplified=False, iszerofunc=_iszero,
simplify=False):
"""
Returns the rank of a matrix
>>> from sympy import Matrix
>>> from sympy.abc import x
>>> m = Matrix([[1, 2], [x, 1 - 1/x]])
>>> m.rank()
2
>>> n = Matrix(3, 3, range(1, 10))
>>> n.rank()
2
"""
row_reduced = self.rref(simplified=simplified, iszerofunc=iszerofunc, simplify=simplify)
rank = len(row_reduced[-1])
return rank
def nullspace(self, simplified=False, simplify=False):
"""Returns list of vectors (Matrix objects) that span nullspace of self
"""
from sympy.matrices import zeros
if simplified is not False:
SymPyDeprecationWarning(
feature="'simplified' as a keyword to nullspace",
useinstead="simplify=True, or set simplify equal to your "
"own custom simplification function",
issue=3382, deprecated_since_version="0.7.2",
).warn()
simplify = simplify or True
simpfunc = simplify if isinstance(
simplify, FunctionType) else _simplify
reduced, pivots = self.rref(simplify=simpfunc)
basis = []
# create a set of vectors for the basis
for i in range(self.cols - len(pivots)):
basis.append(zeros(self.cols, 1))
# contains the variable index to which the vector corresponds
basiskey, cur = [-1]*len(basis), 0
for i in range(self.cols):
if i not in pivots:
basiskey[cur] = i
cur += 1
for i in range(self.cols):
if i not in pivots: # free var, just set vector's ith place to 1
basis[basiskey.index(i)][i, 0] = 1
else: # add negative of nonpivot entry to corr vector
for j in range(i + 1, self.cols):
line = pivots.index(i)
v = reduced[line, j]
if simplify:
v = simpfunc(v)
if v:
if j in pivots:
# XXX: Is this the correct error?
raise NotImplementedError(
"Could not compute the nullspace of `self`.")
basis[basiskey.index(j)][i, 0] = -v
return [self._new(b) for b in basis]
def berkowitz(self):
"""The Berkowitz algorithm.
Given N x N matrix with symbolic content, compute efficiently
coefficients of characteristic polynomials of 'self' and all
its square sub-matrices composed by removing both i-th row
and column, without division in the ground domain.
This method is particularly useful for computing determinant,
principal minors and characteristic polynomial, when 'self'
has complicated coefficients e.g. polynomials. Semi-direct
usage of this algorithm is also important in computing
efficiently sub-resultant PRS.
Assuming that M is a square matrix of dimension N x N and
I is N x N identity matrix, then the following following
definition of characteristic polynomial is begin used:
charpoly(M) = det(t*I - M)
As a consequence, all polynomials generated by Berkowitz
algorithm are monic.
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> M = Matrix([[x, y, z], [1, 0, 0], [y, z, x]])
>>> p, q, r = M.berkowitz()
>>> p # 1 x 1 M's sub-matrix
(1, -x)
>>> q # 2 x 2 M's sub-matrix
(1, -x, -y)
>>> r # 3 x 3 M's sub-matrix
(1, -2*x, x**2 - y*z - y, x*y - z**2)
For more information on the implemented algorithm refer to:
[1] S.J. Berkowitz, On computing the determinant in small
parallel time using a small number of processors, ACM,
Information Processing Letters 18, 1984, pp. 147-150
[2] M. Keber, Division-Free computation of sub-resultants
using Bezout matrices, Tech. Report MPI-I-2006-1-006,
Saarbrucken, 2006
See Also
========
berkowitz_det
berkowitz_minors
berkowitz_charpoly
berkowitz_eigenvals
"""
from sympy.matrices import zeros
if not self.is_square:
raise NonSquareMatrixError()
A, N = self, self.rows
transforms = [0]*(N - 1)
for n in range(N, 1, -1):
T, k = zeros(n + 1, n), n - 1
R, C = -A[k, :k], A[:k, k]
A, a = A[:k, :k], -A[k, k]
items = [C]
for i in range(0, n - 2):
items.append(A*items[i])
for i, B in enumerate(items):
items[i] = (R*B)[0, 0]
items = [S.One, a] + items
for i in range(n):
T[i:, i] = items[:n - i + 1]
transforms[k - 1] = T
polys = [self._new([S.One, -A[0, 0]])]
for i, T in enumerate(transforms):
polys.append(T*polys[i])
return tuple(map(tuple, polys))
def berkowitz_det(self):
"""Computes determinant using Berkowitz method.
See Also
========
det
berkowitz
"""
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
poly = self.berkowitz()[-1]
sign = (-1)**(len(poly) - 1)
return sign*poly[-1]
def berkowitz_minors(self):
"""Computes principal minors using Berkowitz method.
See Also
========
berkowitz
"""
sign, minors = S.NegativeOne, []
for poly in self.berkowitz():
minors.append(sign*poly[-1])
sign = -sign
return tuple(minors)
def berkowitz_charpoly(self, x=Dummy('lambda'), simplify=_simplify):
"""Computes characteristic polynomial minors using Berkowitz method.
A PurePoly is returned so using different variables for ``x`` does
not affect the comparison or the polynomials:
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y
>>> A = Matrix([[1, 3], [2, 0]])
>>> A.berkowitz_charpoly(x) == A.berkowitz_charpoly(y)
True
Specifying ``x`` is optional; a Dummy with name ``lambda`` is used by
default (which looks good when pretty-printed in unicode):
>>> A.berkowitz_charpoly().as_expr()
_lambda**2 - _lambda - 6
No test is done to see that ``x`` doesn't clash with an existing
symbol, so using the default (``lambda``) or your own Dummy symbol is
the safest option:
>>> A = Matrix([[1, 2], [x, 0]])
>>> A.charpoly().as_expr()
_lambda**2 - _lambda - 2*x
>>> A.charpoly(x).as_expr()
x**2 - 3*x
See Also
========
berkowitz
"""
return PurePoly(map(simplify, self.berkowitz()[-1]), x)
charpoly = berkowitz_charpoly
def berkowitz_eigenvals(self, **flags):
"""Computes eigenvalues of a Matrix using Berkowitz method.
See Also
========
berkowitz
"""
return roots(self.berkowitz_charpoly(Dummy('x')), **flags)
def eigenvals(self, **flags):
"""Return eigen values using the berkowitz_eigenvals routine.
Since the roots routine doesn't always work well with Floats,
they will be replaced with Rationals before calling that
routine. If this is not desired, set flag ``rational`` to False.
"""
# roots doesn't like Floats, so replace them with Rationals
# unless the nsimplify flag indicates that this has already
# been done, e.g. in eigenvects
if flags.pop('rational', True):
if any(v.has(Float) for v in self):
self = self._new(self.rows, self.cols,
[nsimplify(v, rational=True) for v in self])
flags.pop('simplify', None) # pop unsupported flag
return self.berkowitz_eigenvals(**flags)
def eigenvects(self, **flags):
"""Return list of triples (eigenval, multiplicity, basis).
The flag ``simplify`` has two effects:
1) if bool(simplify) is True, as_content_primitive()
will be used to tidy up normalization artifacts;
2) if nullspace needs simplification to compute the
basis, the simplify flag will be passed on to the
nullspace routine which will interpret it there.
If the matrix contains any Floats, they will be changed to Rationals
for computation purposes, but the answers will be returned after being
evaluated with evalf. If it is desired to removed small imaginary
portions during the evalf step, pass a value for the ``chop`` flag.
"""
from sympy.matrices import eye
simplify = flags.get('simplify', True)
primitive = bool(flags.get('simplify', False))
chop = flags.pop('chop', False)
flags.pop('multiple', None) # remove this if it's there
# roots doesn't like Floats, so replace them with Rationals
float = False
if any(v.has(Float) for v in self):
float = True
self = self._new(self.rows, self.cols, [nsimplify(
v, rational=True) for v in self])
flags['rational'] = False # to tell eigenvals not to do this
out, vlist = [], self.eigenvals(**flags)
vlist = vlist.items()
vlist.sort(key=default_sort_key)
flags.pop('rational', None)
for r, k in vlist:
tmp = self.as_mutable() - eye(self.rows)*r
basis = tmp.nullspace()
# whether tmp.is_symbolic() is True or False, it is possible that
# the basis will come back as [] in which case simplification is
# necessary.
if not basis:
# The nullspace routine failed, try it again with simplification
basis = tmp.nullspace(simplify=simplify)
if not basis:
raise NotImplementedError(
"Can't evaluate eigenvector for eigenvalue %s" % r)
if primitive:
# the relationship A*e = lambda*e will still hold if we change the
# eigenvector; so if simplify is True we tidy up any normalization
# artifacts with as_content_primtive (default) and remove any pure Integer
# denominators.
l = 1
for i, b in enumerate(basis[0]):
c, p = signsimp(b).as_content_primitive()
if c is not S.One:
b = c*p
l = ilcm(l, c.q)
basis[0][i] = b
if l != 1:
basis[0] *= l
if float:
out.append((r.evalf(chop=chop), k, [
self._new(b).evalf(chop=chop) for b in basis]))
else:
out.append((r, k, [self._new(b) for b in basis]))
return out
def singular_values(self):
"""Compute the singular values of a Matrix
Examples
========
>>> from sympy import Matrix, Symbol
>>> x = Symbol('x', real=True)
>>> A = Matrix([[0, 1, 0], [0, x, 0], [-1, 0, 0]])
>>> A.singular_values()
[sqrt(x**2 + 1), 1, 0]
See Also
========
condition_number
"""
self = self.as_mutable()
# Compute eigenvalues of A.H A
valmultpairs = (self.H*self).eigenvals()
# Expands result from eigenvals into a simple list
vals = []
for k, v in valmultpairs.items():
vals += [sqrt(k)]*v # dangerous! same k in several spots!
# sort them in descending order
vals.sort(reverse=True, key=default_sort_key)
return vals
def condition_number(self):
"""Returns the condition number of a matrix.
This is the maximum singular value divided by the minimum singular value
Examples
========
>>> from sympy import Matrix, S
>>> A = Matrix([[1, 0, 0], [0, 10, 0], [0, 0, S.One/10]])
>>> A.condition_number()
100
See Also
========
singular_values
"""
singularvalues = self.singular_values()
return Max(*singularvalues) / Min(*singularvalues)
def __getattr__(self, attr):
if attr in ('diff', 'integrate', 'limit'):
def doit(*args):
item_doit = lambda item: getattr(item, attr)(*args)
return self.applyfunc(item_doit)
return doit
else:
raise AttributeError(
"%s has no attribute %s." % (self.__class__.__name__, attr))
def integrate(self, *args):
"""Integrate each element of the matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.integrate((x, ))
Matrix([
[x**2/2, x*y],
[ x, 0]])
>>> M.integrate((x, 0, 2))
Matrix([
[2, 2*y],
[2, 0]])
See Also
========
limit
diff
"""
return self._new(self.rows, self.cols,
lambda i, j: self[i, j].integrate(*args))
def limit(self, *args):
"""Calculate the limit of each element in the matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.limit(x, 2)
Matrix([
[2, y],
[1, 0]])
See Also
========
integrate
diff
"""
return self._new(self.rows, self.cols,
lambda i, j: self[i, j].limit(*args))
def diff(self, *args):
"""Calculate the derivative of each element in the matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.diff(x)
Matrix([
[1, 0],
[0, 0]])
See Also
========
integrate
limit
"""
return self._new(self.rows, self.cols,
lambda i, j: self[i, j].diff(*args))
def vec(self):
"""Return the Matrix converted into a one column matrix by stacking columns
Examples
========
>>> from sympy import Matrix
>>> m=Matrix([[1, 3], [2, 4]])
>>> m
Matrix([
[1, 3],
[2, 4]])
>>> m.vec()
Matrix([
[1],
[2],
[3],
[4]])
See Also
========
vech
"""
return self.T.reshape(len(self), 1)
def vech(self, diagonal=True, check_symmetry=True):
"""Return the unique elements of a symmetric Matrix as a one column matrix
by stacking the elements in the lower triangle.
Arguments:
diagonal -- include the diagonal cells of self or not
check_symmetry -- checks symmetry of self but not completely reliably
Examples
========
>>> from sympy import Matrix
>>> m=Matrix([[1, 2], [2, 3]])
>>> m
Matrix([
[1, 2],
[2, 3]])
>>> m.vech()
Matrix([
[1],
[2],
[3]])
>>> m.vech(diagonal=False)
Matrix([[2]])
See Also
========
vec
"""
from sympy.matrices import zeros
c = self.cols
if c != self.rows:
raise ShapeError("Matrix must be square")
if check_symmetry:
self.simplify()
if self != self.transpose():
raise ValueError("Matrix appears to be asymmetric; consider check_symmetry=False")
count = 0
if diagonal:
v = zeros(c*(c + 1) // 2, 1)
for j in range(c):
for i in range(j, c):
v[count] = self[i, j]
count += 1
else:
v = zeros(c*(c - 1) // 2, 1)
for j in range(c):
for i in range(j + 1, c):
v[count] = self[i, j]
count += 1
return v
def get_diag_blocks(self):
"""Obtains the square sub-matrices on the main diagonal of a square matrix.
Useful for inverting symbolic matrices or solving systems of
linear equations which may be decoupled by having a block diagonal
structure.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> A = Matrix([[1, 3, 0, 0], [y, z*z, 0, 0], [0, 0, x, 0], [0, 0, 0, 0]])
>>> a1, a2, a3 = A.get_diag_blocks()
>>> a1
Matrix([
[1, 3],
[y, z**2]])
>>> a2
Matrix([[x]])
>>> a3
Matrix([[0]])
"""
sub_blocks = []
def recurse_sub_blocks(M):
i = 1
while i <= M.shape[0]:
if i == 1:
to_the_right = M[0, i:]
to_the_bottom = M[i:, 0]
else:
to_the_right = M[:i, i:]
to_the_bottom = M[i:, :i]
if any(to_the_right) or any(to_the_bottom):
i += 1
continue
else:
sub_blocks.append(M[:i, :i])
if M.shape == M[:i, :i].shape:
return
else:
recurse_sub_blocks(M[i:, i:])
return
recurse_sub_blocks(self)
return sub_blocks
def diagonalize(self, reals_only=False, sort=False, normalize=False):
"""
Return (P, D), where D is diagonal and
D = P^-1 * M * P
where M is current matrix.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
>>> m
Matrix([
[1, 2, 0],
[0, 3, 0],
[2, -4, 2]])
>>> (P, D) = m.diagonalize()
>>> D
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> P
Matrix([
[-1, 0, -1],
[ 0, 0, -1],
[ 2, 1, 2]])
>>> P.inv() * m * P
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
See Also
========
is_diagonal
is_diagonalizable
"""
from sympy.matrices import diag
if not self.is_square:
raise NonSquareMatrixError()
if not self.is_diagonalizable(reals_only, False):
self._diagonalize_clear_subproducts()
raise MatrixError("Matrix is not diagonalizable")
else:
if self._eigenvects is None:
self._eigenvects = self.eigenvects(simplify=True)
if sort:
self._eigenvects.sort(key=default_sort_key)
self._eigenvects.reverse()
diagvals = []
P = self._new(self.rows, 0, [])
for eigenval, multiplicity, vects in self._eigenvects:
for k in range(multiplicity):
diagvals.append(eigenval)
vec = vects[k]
if normalize:
vec = vec / vec.norm()
P = P.col_insert(P.cols, vec)
D = diag(*diagvals)
self._diagonalize_clear_subproducts()
return (P, D)
def is_diagonalizable(self, reals_only=False, clear_subproducts=True):
"""Check if matrix is diagonalizable.
If reals_only==True then check that diagonalized matrix consists of the only not complex values.
Some subproducts could be used further in other methods to avoid double calculations,
By default (if clear_subproducts==True) they will be deleted.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
>>> m
Matrix([
[1, 2, 0],
[0, 3, 0],
[2, -4, 2]])
>>> m.is_diagonalizable()
True
>>> m = Matrix(2, 2, [0, 1, 0, 0])
>>> m
Matrix([
[0, 1],
[0, 0]])
>>> m.is_diagonalizable()
False
>>> m = Matrix(2, 2, [0, 1, -1, 0])
>>> m
Matrix([
[ 0, 1],
[-1, 0]])
>>> m.is_diagonalizable()
True
>>> m.is_diagonalizable(True)
False
See Also
========
is_diagonal
diagonalize
"""
if not self.is_square:
return False
res = False
self._is_symbolic = self.is_symbolic()
self._is_symmetric = self.is_symmetric()
self._eigenvects = None
#if self._is_symbolic:
# self._diagonalize_clear_subproducts()
# raise NotImplementedError("Symbolic matrices are not implemented for diagonalization yet")
self._eigenvects = self.eigenvects(simplify=True)
all_iscorrect = True
for eigenval, multiplicity, vects in self._eigenvects:
if len(vects) != multiplicity:
all_iscorrect = False
break
elif reals_only and not eigenval.is_real:
all_iscorrect = False
break
res = all_iscorrect
if clear_subproducts:
self._diagonalize_clear_subproducts()
return res
def _diagonalize_clear_subproducts(self):
del self._is_symbolic
del self._is_symmetric
del self._eigenvects
def jordan_form(self, calc_transformation=True):
"""Return Jordan form J of current matrix.
If calc_transformation is specified as False, then transformation P such that
J = P^-1 * M * P
will not be calculated.
Notes
=====
Calculation of transformation P is not implemented yet.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix([
... [ 6, 5, -2, -3],
... [-3, -1, 3, 3],
... [ 2, 1, -2, -3],
... [-1, 1, 5, 5]])
>>> (P, J) = m.jordan_form()
>>> J
Matrix([
[2, 1, 0, 0],
[0, 2, 0, 0],
[0, 0, 2, 1],
[0, 0, 0, 2]])
See Also
========
jordan_cells
"""
from sympy.matrices import diag
(P, Jcells) = self.jordan_cells(calc_transformation)
J = diag(*Jcells)
return (P, J)
def jordan_cells(self, calc_transformation=True):
"""Return a list of Jordan cells of current matrix.
This list shape Jordan matrix J.
If calc_transformation is specified as False, then transformation P such that
J = P^-1 * M * P
will not be calculated.
Notes
=====
Calculation of transformation P is not implemented yet.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(4, 4, [
... 6, 5, -2, -3,
... -3, -1, 3, 3,
... 2, 1, -2, -3,
... -1, 1, 5, 5])
>>> (P, Jcells) = m.jordan_cells()
>>> Jcells[0]
Matrix([
[2, 1],
[0, 2]])
>>> Jcells[1]
Matrix([
[2, 1],
[0, 2]])
See Also
========
jordan_form
"""
from sympy.matrices import jordan_cell, diag
if not self.is_square:
raise NonSquareMatrixError()
_eigenvects = self.eigenvects()
Jcells = []
for eigenval, multiplicity, vects in _eigenvects:
geometrical = len(vects)
if geometrical == multiplicity:
Jcell = diag(*([eigenval]*multiplicity))
Jcells.append(Jcell)
else:
sizes = self._jordan_split(multiplicity, geometrical)
cells = []
for size in sizes:
cell = jordan_cell(eigenval, size)
cells.append(cell)
Jcells += cells
return (None, Jcells)
def _jordan_split(self, algebraical, geometrical):
"""Return a list of integers with sum equal to 'algebraical'
and length equal to 'geometrical'"""
n1 = algebraical // geometrical
res = [n1]*geometrical
res[len(res) - 1] += algebraical % geometrical
assert sum(res) == algebraical
return res
def has(self, *patterns):
"""Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import Matrix, Float
>>> from sympy.abc import x, y
>>> A = Matrix(((1, x), (0.2, 3)))
>>> A.has(x)
True
>>> A.has(y)
False
>>> A.has(Float)
True
"""
return any(a.has(*patterns) for a in self._mat)
def dual(self):
"""Returns the dual of a matrix, which is:
`(1/2)*levicivita(i, j, k, l)*M(k, l)` summed over indices `k` and `l`
Since the levicivita method is anti_symmetric for any pairwise
exchange of indices, the dual of a symmetric matrix is the zero
matrix. Strictly speaking the dual defined here assumes that the
'matrix' `M` is a contravariant anti_symmetric second rank tensor,
so that the dual is a covariant second rank tensor.
"""
from sympy import LeviCivita
from sympy.matrices import zeros
M, n = self[:, :], self.rows
work = zeros(n)
if self.is_symmetric():
return work
for i in range(1, n):
for j in range(1, n):
acum = 0
for k in range(1, n):
acum += LeviCivita(i, j, 0, k)*M[0, k]
work[i, j] = acum
work[j, i] = -acum
for l in range(1, n):
acum = 0
for a in range(1, n):
for b in range(1, n):
acum += LeviCivita(0, l, a, b)*M[a, b]
acum /= 2
work[0, l] = -acum
work[l, 0] = acum
return work
@classmethod
def hstack(cls, *args):
"""Return a matrix formed by joining args horizontally (i.e.
by repeated application of row_join).
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> Matrix.hstack(eye(2), 2*eye(2))
Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2]])
"""
return reduce(cls.row_join, args)
@classmethod
def vstack(cls, *args):
"""Return a matrix formed by joining args vertically (i.e.
by repeated application of col_join).
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> Matrix.vstack(eye(2), 2*eye(2))
Matrix([
[1, 0],
[0, 1],
[2, 0],
[0, 2]])
"""
return reduce(cls.col_join, args)
def row_join(self, rhs):
"""Concatenates two matrices along self's last and rhs's first column
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(3, 1)
>>> M.row_join(V)
Matrix([
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1]])
See Also
========
row
col_join
"""
if self.rows != rhs.rows:
raise ShapeError(
"`self` and `rhs` must have the same number of rows.")
newmat = self.zeros(self.rows, self.cols + rhs.cols)
newmat[:, :self.cols] = self
newmat[:, self.cols:] = rhs
return newmat
def col_join(self, bott):
"""Concatenates two matrices along self's last and bott's first row
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(1, 3)
>>> M.col_join(V)
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[1, 1, 1]])
See Also
========
col
row_join
"""
if self.cols != bott.cols:
raise ShapeError(
"`self` and `bott` must have the same number of columns.")
newmat = self.zeros(self.rows + bott.rows, self.cols)
newmat[:self.rows, :] = self
newmat[self.rows:, :] = bott
return newmat
def row_insert(self, pos, mti):
"""Insert one or more rows at the given row position.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(1, 3)
>>> M.row_insert(1, V)
Matrix([
[0, 0, 0],
[1, 1, 1],
[0, 0, 0],
[0, 0, 0]])
See Also
========
row
col_insert
"""
if pos == 0:
return mti.col_join(self)
elif pos < 0:
pos = self.rows + pos
if pos < 0:
pos = 0
elif pos > self.rows:
pos = self.rows
if self.cols != mti.cols:
raise ShapeError(
"`self` and `mti` must have the same number of columns.")
newmat = self.zeros(self.rows + mti.rows, self.cols)
i, j = pos, pos + mti.rows
newmat[:i, :] = self[:i, :]
newmat[i: j, :] = mti
newmat[j:, :] = self[i:, :]
return newmat
def col_insert(self, pos, mti):
"""Insert one or more columns at the given column position.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(3, 1)
>>> M.col_insert(1, V)
Matrix([
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0]])
See Also
========
col
row_insert
"""
if pos == 0:
return mti.row_join(self)
elif pos < 0:
pos = self.cols + pos
if pos < 0:
pos = 0
elif pos > self.cols:
pos = self.cols
if self.rows != mti.rows:
raise ShapeError("self and mti must have the same number of rows.")
newmat = self.zeros(self.rows, self.cols + mti.cols)
i, j = pos, pos + mti.cols
newmat[:, :i] = self[:, :i]
newmat[:, i:j] = mti
newmat[:, j:] = self[:, i:]
return newmat
def replace(self, F, G, map=False):
"""Replaces Function F in Matrix entries with Function G.
Examples
========
>>> from sympy import symbols, Function, Matrix
>>> F, G = symbols('F, G', cls=Function)
>>> M = Matrix(2, 2, lambda i, j: F(i+j)) ; M
Matrix([
[F(0), F(1)],
[F(1), F(2)]])
>>> N = M.replace(F,G)
>>> N
Matrix([
[G(0), G(1)],
[G(1), G(2)]])
"""
M = self[:, :]
return M.applyfunc(lambda x: x.replace(F, G, map))
def classof(A, B):
"""
Get the type of the result when combining matrices of different types.
Currently the strategy is that immutability is contagious.
Examples
========
>>> from sympy import Matrix, ImmutableMatrix
>>> from sympy.matrices.matrices import classof
>>> M = Matrix([[1, 2], [3, 4]]) # a Mutable Matrix
>>> IM = ImmutableMatrix([[1, 2], [3, 4]])
>>> classof(M, IM)
<class 'sympy.matrices.immutable.ImmutableMatrix'>
"""
try:
if A._class_priority > B._class_priority:
return A.__class__
else:
return B.__class__
except:
pass
try:
import numpy
if isinstance(A, numpy.ndarray):
return B.__class__
if isinstance(B, numpy.ndarray):
return A.__class__
except:
pass
raise TypeError("Incompatible classes %s, %s" % (A.__class__, B.__class__))
def a2idx(j, n=None):
"""Return integer after making positive and validating against n."""
if isinstance(j, slice):
return j
if type(j) is not int:
try:
j = j.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (j, ))
if n is not None:
if j < 0:
j += n
if not (j >= 0 and j < n):
raise IndexError("Index out of range: a[%s]" % (j, ))
return int(j)
|
lidavidm/mathics-heroku
|
venv/lib/python2.7/site-packages/sympy/matrices/matrices.py
|
Python
|
gpl-3.0
| 108,816
|
[
"DIRAC",
"Gaussian"
] |
60b899481207cc9acf9b459fa0ce98d56b1cba2238f3b0495b0b269bdad06eee
|
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
"""Compare gaussian KDE results to scipy.stats.gaussian_kde"""
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
if __name__ == '__main__':
import nose
nose.runmodule()
|
mblondel/scikit-learn
|
sklearn/neighbors/tests/test_kd_tree.py
|
Python
|
bsd-3-clause
| 7,918
|
[
"Gaussian"
] |
d3c4a2f0b6c5d975909594bc7e5138eabc4bbb3cfadbdc8a36a9c51fb5941c59
|
import numpy, theano
from theano import tensor as T
# TODO: Covariance parameters may be too simple. Think of a way to make the matrix positive semi-definite and non-singular
class ReconstructionModel(object):
def __init__(self, ont_rep, vocab_rep, init_hyp_strengths=None, rec_model_type="gaussian"):
"""
ont_rep: Ontology rep theano shared variable
vocab_rep: Vocabulary rep theano shared variable
init_hyp_strengths: A numpy matrix of size ont_size X vocab_size, with init_hyp_strengths[concept_ind][word_ind] showing the strength of word as a hypernym of concept (needed only for multinomial reconstruction)
rec_model_type: multinomial or gaussian
"""
if rec_model_type not in ["gaussian", "multinomial"]:
raise NotImplementedError, "Unknown reconstruction model type: %s"%rec_model_type
#numpy_rng = numpy.random.RandomState(12345)
self.rec_model_type = rec_model_type
if self.rec_model_type == "multinomial":
self.hyp_strengths = theano.shared(value=init_hyp_strengths, name='hyp_strengths')
_, word_dim = vocab_rep.get_value().shape
self.vocab_rep = vocab_rep
ont_size, _ = ont_rep.get_value().shape
self.ont_rep = ont_rep
#avg_range = 4 * numpy.sqrt(6. / (ont_size + word_dim))
#init_avgs = numpy.asarray(numpy_rng.uniform(low = -avg_range, high = avg_range, size = (ont_size, word_dim)))
#self.avgs = theano.shared(value = init_avgs, name = 'avgs')
#init_cov_multiples = numpy.asarray([0.2]*ont_size)
#self.cov_multiples = theano.shared(value = init_cov_multiples, name = 'cov_mult')
self.word_dim = word_dim
def get_sym_rec_prob(self, word_ind, concept_ind):
#avg, cov_m = self.avgs[concept_ind], self.cov_multiples[concept_ind]
if self.rec_model_type == "multinomial":
# Softmax converts a vector to a matrix! Making it a vector with [0] before indexing
self.p_r = (T.nnet.softmax(self.hyp_strengths[concept_ind])[0])[word_ind]
else:
avg = self.ont_rep[concept_ind]
cov_m = 0.2
word_rep = self.vocab_rep[word_ind]
rep_m_avg = word_rep - avg
exp_term = -0.5 * T.dot(rep_m_avg, rep_m_avg) * (1. / T.abs_(cov_m))
sqrt_term = T.pow(2 * T.abs_(cov_m) * numpy.pi, self.word_dim)
self.p_r = 1. / T.sqrt(sqrt_term) * T.exp(exp_term)
return self.p_r
def get_params(self):
#return [self.avgs, self.cov_multiples]
if self.rec_model_type == "multinomial":
return [self.hyp_strengths]
else:
return []
def set_params(self, params):
if self.rec_model_type == "multinomial":
trained_hyp_strengths = params[0]
self.hyp_strengths.set_value(trained_hyp_strengths)
else:
return
#avgs, cov_multiples = params
#self.avgs.set_value(avgs)
#self.cov_multiples.set_value(cov_multiples)
|
pdasigi/spade
|
reconstruction.py
|
Python
|
gpl-2.0
| 2,810
|
[
"Gaussian"
] |
a2bed4d4873ec16f775a84c79d03af9f647c09a5564d53154d5c271c16ac8b51
|
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Imputer
from sklearn.model_selection import train_test_split
def get_naive_bayes_models():
gnb = GaussianNB()
mnb = MultinomialNB()
bnb = BernoulliNB()
classifier_list = [gnb,mnb,bnb]
classifier_name_list = ['Gaussian NB','Multinomial NB','Bernoulli NB']
return classifier_list,classifier_name_list
def get_neural_network(hidden_layer_size=50):
mlp = MLPClassifier(hidden_layer_sizes=hidden_layer_size)
return [mlp], ['MultiLayer Perceptron']
def get_ensemble_models():
rf = RandomForestClassifier(n_estimators=51,min_samples_leaf=5,min_samples_split=3)
bagg = BaggingClassifier(n_estimators=71,random_state=42)
extra = ExtraTreesClassifier(n_estimators=57,random_state=42)
ada = AdaBoostClassifier(n_estimators=51,random_state=42)
grad = GradientBoostingClassifier(n_estimators=101,random_state=42)
classifier_list = [rf,bagg,extra,ada,grad]
classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost']
return classifier_list,classifier_name_list
def label_encode_frame(dataframe):
columns = dataframe.columns
encoder = LabelEncoder()
for column in columns:
if type(dataframe[column][0]) is str:
dataframe[column] = encoder.fit_transform(dataframe[column].values)
return dataframe
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ', trained_model_name
predicted_values = trained_model.predict(X_test)
print metrics.classification_report(y_test,predicted_values)
print "Accuracy Score : ",metrics.accuracy_score(y_test,predicted_values)
print "---------------------------------------\n"
filename = 'train.csv'
shelter_frame = pd.read_csv(filename)
target_variable = 'OutcomeType'
columns_to_drop = ['AnimalID','Name','DateTime','OutcomeType','OutcomeSubtype']
class_labels = list(shelter_frame[target_variable].values)
shelter_frame.drop(columns_to_drop,axis=1,inplace=True)
encoded_frame = label_encode_frame(shelter_frame)
X_train,X_test,y_train,y_test = train_test_split(encoded_frame,class_labels,test_size=0.2,random_state=42)
classifier_list,classifier_name_list = get_ensemble_models()
for classifier,classifier_name in zip(classifier_list,classifier_name_list):
classifier.fit(X_train,y_train)
print_evaluation_metrics(classifier,classifier_name,X_test,y_test)
|
rupakc/Kaggle-Compendium
|
Shelter Animal Outcomes/shelter-baseline.py
|
Python
|
mit
| 2,963
|
[
"Gaussian"
] |
b18f6ee059300f7106a2dcbd7a93919139b57d3323783c6fe85211782ae7a705
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
# Andrew Dykstra <andrew.r.dykstra@gmail.com>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
import os
import os.path as op
import numpy as np
from scipy import sparse
from ..externals.six import string_types
from ..utils import verbose, logger, warn
from ..io.pick import (channel_type, pick_info, pick_types,
_check_excludes_includes, _PICK_TYPES_KEYS)
from ..io.constants import FIFF
def _get_meg_system(info):
"""Educated guess for the helmet type based on channels"""
system = '306m'
for ch in info['chs']:
if ch['kind'] == FIFF.FIFFV_MEG_CH:
coil_type = ch['coil_type'] & 0xFFFF
if coil_type == FIFF.FIFFV_COIL_NM_122:
system = '122m'
break
elif coil_type // 1000 == 3: # All Vectorview coils are 30xx
system = '306m'
break
elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or
coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD):
nmag = np.sum([c['kind'] == FIFF.FIFFV_MEG_CH
for c in info['chs']])
system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh'
break
elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD:
system = 'CTF_275'
break
elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD:
system = 'KIT'
break
elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD:
system = 'BabySQUID'
break
return system
def _contains_ch_type(info, ch_type):
"""Check whether a certain channel type is in an info object
Parameters
---------
info : instance of Info
The measurement information.
ch_type : str
the channel type to be checked for
Returns
-------
has_ch_type : bool
Whether the channel type is present or not.
"""
if not isinstance(ch_type, string_types):
raise ValueError('`ch_type` is of class {actual_class}. It must be '
'`str`'.format(actual_class=type(ch_type)))
meg_extras = ['mag', 'grad', 'planar1', 'planar2']
valid_channel_types = sorted([key for key in _PICK_TYPES_KEYS
if key != 'meg'] + meg_extras)
if ch_type not in valid_channel_types:
raise ValueError('ch_type must be one of %s, not "%s"'
% (valid_channel_types, ch_type))
if info is None:
raise ValueError('Cannot check for channels of type "%s" because info '
'is None' % (ch_type,))
return ch_type in [channel_type(info, ii) for ii in range(info['nchan'])]
def _get_ch_type(inst, ch_type):
"""Helper to choose a single channel type (usually for plotting)
Usually used in plotting to plot a single datatype, e.g. look for mags,
then grads, then ... to plot.
"""
if ch_type is None:
for type_ in ['mag', 'grad', 'planar1', 'planar2', 'eeg']:
if type_ in inst:
ch_type = type_
break
else:
raise RuntimeError('No plottable channel types found')
return ch_type
@verbose
def equalize_channels(candidates, verbose=None):
"""Equalize channel picks for a collection of MNE-Python objects
Parameters
----------
candidates : list
list Raw | Epochs | Evoked | AverageTFR
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Notes
-----
This function operates inplace.
"""
from ..io.base import _BaseRaw
from ..epochs import _BaseEpochs
from ..evoked import Evoked
from ..time_frequency import AverageTFR
if not all(isinstance(c, (_BaseRaw, _BaseEpochs, Evoked, AverageTFR))
for c in candidates):
valid = ['Raw', 'Epochs', 'Evoked', 'AverageTFR']
raise ValueError('candidates must be ' + ' or '.join(valid))
chan_max_idx = np.argmax([c.info['nchan'] for c in candidates])
chan_template = candidates[chan_max_idx].ch_names
logger.info('Identiying common channels ...')
channels = [set(c.ch_names) for c in candidates]
common_channels = set(chan_template).intersection(*channels)
dropped = list()
for c in candidates:
drop_them = list(set(c.ch_names) - common_channels)
if drop_them:
c.drop_channels(drop_them)
dropped.extend(drop_them)
if dropped:
dropped = list(set(dropped))
logger.info('Dropped the following channels:\n%s' % dropped)
else:
logger.info('all channels are corresponding, nothing to do.')
class ContainsMixin(object):
"""Mixin class for Raw, Evoked, Epochs
"""
def __contains__(self, ch_type):
"""Check channel type membership
Parameters
----------
ch_type : str
Channel type to check for. Can be e.g. 'meg', 'eeg', 'stim', etc.
Returns
-------
in : bool
Whether or not the instance contains the given channel type.
Examples
--------
Channel type membership can be tested as::
>>> 'meg' in inst # doctest: +SKIP
True
>>> 'seeg' in inst # doctest: +SKIP
False
"""
if ch_type == 'meg':
has_ch_type = (_contains_ch_type(self.info, 'mag') or
_contains_ch_type(self.info, 'grad'))
else:
has_ch_type = _contains_ch_type(self.info, ch_type)
return has_ch_type
# XXX Eventually de-duplicate with _kind_dict of mne/io/meas_info.py
_human2fiff = {'ecg': FIFF.FIFFV_ECG_CH,
'eeg': FIFF.FIFFV_EEG_CH,
'emg': FIFF.FIFFV_EMG_CH,
'eog': FIFF.FIFFV_EOG_CH,
'exci': FIFF.FIFFV_EXCI_CH,
'ias': FIFF.FIFFV_IAS_CH,
'misc': FIFF.FIFFV_MISC_CH,
'resp': FIFF.FIFFV_RESP_CH,
'seeg': FIFF.FIFFV_SEEG_CH,
'stim': FIFF.FIFFV_STIM_CH,
'syst': FIFF.FIFFV_SYST_CH,
'bio': FIFF.FIFFV_BIO_CH,
'ecog': FIFF.FIFFV_ECOG_CH}
_human2unit = {'ecg': FIFF.FIFF_UNIT_V,
'eeg': FIFF.FIFF_UNIT_V,
'emg': FIFF.FIFF_UNIT_V,
'eog': FIFF.FIFF_UNIT_V,
'exci': FIFF.FIFF_UNIT_NONE,
'ias': FIFF.FIFF_UNIT_NONE,
'misc': FIFF.FIFF_UNIT_V,
'resp': FIFF.FIFF_UNIT_NONE,
'seeg': FIFF.FIFF_UNIT_V,
'stim': FIFF.FIFF_UNIT_NONE,
'syst': FIFF.FIFF_UNIT_NONE,
'bio': FIFF.FIFF_UNIT_V,
'ecog': FIFF.FIFF_UNIT_V}
_unit2human = {FIFF.FIFF_UNIT_V: 'V',
FIFF.FIFF_UNIT_T: 'T',
FIFF.FIFF_UNIT_T_M: 'T/m',
FIFF.FIFF_UNIT_NONE: 'NA'}
def _check_set(ch, projs, ch_type):
"""Helper to make sure type change is compatible with projectors"""
new_kind = _human2fiff[ch_type]
if ch['kind'] != new_kind:
for proj in projs:
if ch['ch_name'] in proj['data']['col_names']:
raise RuntimeError('Cannot change channel type for channel %s '
'in projector "%s"'
% (ch['ch_name'], proj['desc']))
ch['kind'] = new_kind
class SetChannelsMixin(object):
"""Mixin class for Raw, Evoked, Epochs
"""
def _get_channel_positions(self, picks=None):
"""Gets channel locations from info
Parameters
----------
picks : array-like of int | None
Indices of channels to include. If None (default), all meg and eeg
channels that are available are returned (bad channels excluded).
Notes
-----
.. versionadded:: 0.9.0
"""
if picks is None:
picks = pick_types(self.info, meg=True, eeg=True)
chs = self.info['chs']
pos = np.array([chs[k]['loc'][:3] for k in picks])
n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0)
if n_zero > 1: # XXX some systems have origin (0, 0, 0)
raise ValueError('Could not extract channel positions for '
'{} channels'.format(n_zero))
return pos
def _set_channel_positions(self, pos, names):
"""Update channel locations in info
Parameters
----------
pos : array-like | np.ndarray, shape (n_points, 3)
The channel positions to be set.
names : list of str
The names of the channels to be set.
Notes
-----
.. versionadded:: 0.9.0
"""
if len(pos) != len(names):
raise ValueError('Number of channel positions not equal to '
'the number of names given.')
pos = np.asarray(pos, dtype=np.float)
if pos.shape[-1] != 3 or pos.ndim != 2:
msg = ('Channel positions must have the shape (n_points, 3) '
'not %s.' % (pos.shape,))
raise ValueError(msg)
for name, p in zip(names, pos):
if name in self.ch_names:
idx = self.ch_names.index(name)
self.info['chs'][idx]['loc'][:3] = p
else:
msg = ('%s was not found in the info. Cannot be updated.'
% name)
raise ValueError(msg)
def set_channel_types(self, mapping):
"""Define the sensor type of channels.
Note: The following sensor types are accepted:
ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst, ecog
Parameters
----------
mapping : dict
a dictionary mapping a channel to a sensor type (str)
{'EEG061': 'eog'}.
Notes
-----
.. versionadded:: 0.9.0
"""
ch_names = self.info['ch_names']
# first check and assemble clean mappings of index and name
for ch_name, ch_type in mapping.items():
if ch_name not in ch_names:
raise ValueError("This channel name (%s) doesn't exist in "
"info." % ch_name)
c_ind = ch_names.index(ch_name)
if ch_type not in _human2fiff:
raise ValueError('This function cannot change to this '
'channel type: %s. Accepted channel types '
'are %s.'
% (ch_type,
", ".join(sorted(_human2unit.keys()))))
# Set sensor type
_check_set(self.info['chs'][c_ind], self.info['projs'], ch_type)
unit_old = self.info['chs'][c_ind]['unit']
unit_new = _human2unit[ch_type]
if unit_old not in _unit2human:
raise ValueError("Channel '%s' has unknown unit (%s). Please "
"fix the measurement info of your data."
% (ch_name, unit_old))
if unit_old != _human2unit[ch_type]:
warn("The unit for channel %s has changed from %s to %s."
% (ch_name, _unit2human[unit_old], _unit2human[unit_new]))
self.info['chs'][c_ind]['unit'] = _human2unit[ch_type]
if ch_type in ['eeg', 'seeg', 'ecog']:
self.info['chs'][c_ind]['coil_type'] = FIFF.FIFFV_COIL_EEG
else:
self.info['chs'][c_ind]['coil_type'] = FIFF.FIFFV_COIL_NONE
def rename_channels(self, mapping):
"""Rename channels.
Parameters
----------
mapping : dict | callable
a dictionary mapping the old channel to a new channel name
e.g. {'EEG061' : 'EEG161'}. Can also be a callable function
that takes and returns a string (new in version 0.10.0).
Notes
-----
.. versionadded:: 0.9.0
"""
rename_channels(self.info, mapping)
@verbose
def set_montage(self, montage, verbose=None):
"""Set EEG sensor configuration
Parameters
----------
montage : instance of Montage or DigMontage
The montage to use.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Notes
-----
Operates in place.
.. versionadded:: 0.9.0
"""
from .montage import _set_montage
_set_montage(self.info, montage)
def plot_sensors(self, kind='topomap', ch_type=None, title=None,
show_names=False, show=True):
"""
Plot sensors positions.
Parameters
----------
kind : str
Whether to plot the sensors as 3d or as topomap. Available options
'topomap', '3d'. Defaults to 'topomap'.
ch_type : 'mag' | 'grad' | 'eeg' | 'seeg' | 'ecog' | None
The channel type to plot. If None, then channels are chosen in the
order given above.
title : str | None
Title for the figure. If None (default), equals to
``'Sensor positions (%s)' % ch_type``.
show_names : bool
Whether to display all channel names. Defaults to False.
show : bool
Show figure if True. Defaults to True.
Returns
-------
fig : instance of matplotlib figure
Figure containing the sensor topography.
See Also
--------
mne.viz.plot_layout
Notes
-----
This function plots the sensor locations from the info structure using
matplotlib. For drawing the sensors using mayavi see
:func:`mne.viz.plot_trans`.
.. versionadded:: 0.12.0
"""
from ..viz.utils import plot_sensors
return plot_sensors(self.info, kind=kind, ch_type=ch_type, title=title,
show_names=show_names, show=show)
class UpdateChannelsMixin(object):
"""Mixin class for Raw, Evoked, Epochs, AverageTFR
"""
def pick_types(self, meg=True, eeg=False, stim=False, eog=False,
ecg=False, emg=False, ref_meg='auto', misc=False,
resp=False, chpi=False, exci=False, ias=False, syst=False,
seeg=False, bio=False, ecog=False, include=[],
exclude='bads', selection=None):
"""Pick some channels by type and names
Parameters
----------
meg : bool | str
If True include all MEG channels. If False include None
If string it can be 'mag', 'grad', 'planar1' or 'planar2' to select
only magnetometers, all gradiometers, or a specific type of
gradiometer.
eeg : bool
If True include EEG channels.
stim : bool
If True include stimulus channels.
eog : bool
If True include EOG channels.
ecg : bool
If True include ECG channels.
emg : bool
If True include EMG channels.
ref_meg: bool | str
If True include CTF / 4D reference channels. If 'auto', the
reference channels are only included if compensations are present.
misc : bool
If True include miscellaneous analog channels.
resp : bool
If True include response-trigger channel. For some MEG systems this
is separate from the stim channel.
chpi : bool
If True include continuous HPI coil channels.
exci : bool
Flux excitation channel used to be a stimulus channel.
ias : bool
Internal Active Shielding data (maybe on Triux only).
syst : bool
System status channel information (on Triux systems only).
seeg : bool
Stereotactic EEG channels.
bio : bool
Bio channels.
ecog : bool
Electrocorticography channels.
include : list of string
List of additional channels to include. If empty do not include
any.
exclude : list of string | str
List of channels to exclude. If 'bads' (default), exclude channels
in ``info['bads']``.
selection : list of string
Restrict sensor channels (MEG, EEG) to this list of channel names.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
Notes
-----
.. versionadded:: 0.9.0
"""
idx = pick_types(
self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg,
ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci,
ias=ias, syst=syst, seeg=seeg, bio=bio, ecog=ecog, include=include,
exclude=exclude, selection=selection)
self._pick_drop_channels(idx)
return self
def pick_channels(self, ch_names):
"""Pick some channels
Parameters
----------
ch_names : list
The list of channels to select.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
Notes
-----
.. versionadded:: 0.9.0
"""
_check_excludes_includes(ch_names)
idx = [self.ch_names.index(c) for c in ch_names if c in self.ch_names]
self._pick_drop_channels(idx)
return self
def drop_channels(self, ch_names):
"""Drop some channels
Parameters
----------
ch_names : list
The list of channels to remove.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
pick_channels
Notes
-----
.. versionadded:: 0.9.0
"""
bad_idx = [self.ch_names.index(c) for c in ch_names
if c in self.ch_names]
idx = np.setdiff1d(np.arange(len(self.ch_names)), bad_idx)
self._pick_drop_channels(idx)
return self
def _pick_drop_channels(self, idx):
# avoid circular imports
from ..io.base import _BaseRaw
from ..epochs import _BaseEpochs
from ..evoked import Evoked
from ..time_frequency import AverageTFR
if isinstance(self, (_BaseRaw, _BaseEpochs)):
if not self.preload:
raise RuntimeError('If Raw or Epochs, data must be preloaded '
'to drop or pick channels')
def inst_has(attr):
return getattr(self, attr, None) is not None
if inst_has('picks'):
self.picks = self.picks[idx]
if inst_has('_cals'):
self._cals = self._cals[idx]
pick_info(self.info, idx, copy=False)
if inst_has('_projector'):
self._projector = self._projector[idx][:, idx]
if isinstance(self, _BaseRaw) and inst_has('_data'):
self._data = self._data.take(idx, axis=0)
elif isinstance(self, _BaseEpochs) and inst_has('_data'):
self._data = self._data.take(idx, axis=1)
elif isinstance(self, AverageTFR) and inst_has('data'):
self.data = self.data.take(idx, axis=0)
elif isinstance(self, Evoked):
self.data = self.data.take(idx, axis=0)
def add_channels(self, add_list, force_update_info=False):
"""Append new channels to the instance.
Parameters
----------
add_list : list
A list of objects to append to self. Must contain all the same
type as the current object
force_update_info : bool
If True, force the info for objects to be appended to match the
values in `self`. This should generally only be used when adding
stim channels for which important metadata won't be overwritten.
.. versionadded:: 0.12
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
"""
# avoid circular imports
from ..io import _BaseRaw, _merge_info
from ..epochs import _BaseEpochs
if not isinstance(add_list, (list, tuple)):
raise AssertionError('Input must be a list or tuple of objs')
# Object-specific checks
if isinstance(self, (_BaseRaw, _BaseEpochs)):
if not all([inst.preload for inst in add_list] + [self.preload]):
raise AssertionError('All data must be preloaded')
data_name = '_data'
if isinstance(self, _BaseRaw):
con_axis = 0
comp_class = _BaseRaw
elif isinstance(self, _BaseEpochs):
con_axis = 1
comp_class = _BaseEpochs
else:
data_name = 'data'
con_axis = 0
comp_class = type(self)
if not all(isinstance(inst, comp_class) for inst in add_list):
raise AssertionError('All input data must be of same type')
data = [getattr(inst, data_name) for inst in [self] + add_list]
# Make sure that all dimensions other than channel axis are the same
compare_axes = [i for i in range(data[0].ndim) if i != con_axis]
shapes = np.array([dat.shape for dat in data])[:, compare_axes]
if not ((shapes[0] - shapes) == 0).all():
raise AssertionError('All dimensions except channels must match')
# Create final data / info objects
data = np.concatenate(data, axis=con_axis)
infos = [self.info] + [inst.info for inst in add_list]
new_info = _merge_info(infos, force_update_to_first=force_update_info)
# Now update the attributes
setattr(self, data_name, data)
self.info = new_info
if isinstance(self, _BaseRaw):
self._cals = np.concatenate([getattr(inst, '_cals')
for inst in [self] + add_list])
return self
class InterpolationMixin(object):
"""Mixin class for Raw, Evoked, Epochs
"""
def interpolate_bads(self, reset_bads=True, mode='accurate'):
"""Interpolate bad MEG and EEG channels.
Operates in place.
Parameters
----------
reset_bads : bool
If True, remove the bads from info.
mode : str
Either `'accurate'` or `'fast'`, determines the quality of the
Legendre polynomial expansion used for interpolation of MEG
channels.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
Notes
-----
.. versionadded:: 0.9.0
"""
from .interpolation import _interpolate_bads_eeg, _interpolate_bads_meg
if getattr(self, 'preload', None) is False:
raise ValueError('Data must be preloaded.')
_interpolate_bads_eeg(self)
_interpolate_bads_meg(self, mode=mode)
if reset_bads is True:
self.info['bads'] = []
return self
def rename_channels(info, mapping):
"""Rename channels.
Parameters
----------
info : dict
Measurement info.
mapping : dict | callable
a dictionary mapping the old channel to a new channel name
e.g. {'EEG061' : 'EEG161'}. Can also be a callable function
that takes and returns a string (new in version 0.10.0).
"""
info._check_consistency()
bads = list(info['bads']) # make our own local copies
ch_names = list(info['ch_names'])
# first check and assemble clean mappings of index and name
if isinstance(mapping, dict):
orig_names = sorted(list(mapping.keys()))
missing = [orig_name not in ch_names for orig_name in orig_names]
if any(missing):
raise ValueError("Channel name(s) in mapping missing from info: "
"%s" % np.array(orig_names)[np.array(missing)])
new_names = [(ch_names.index(ch_name), new_name)
for ch_name, new_name in mapping.items()]
elif callable(mapping):
new_names = [(ci, mapping(ch_name))
for ci, ch_name in enumerate(ch_names)]
else:
raise ValueError('mapping must be callable or dict, not %s'
% (type(mapping),))
# check we got all strings out of the mapping
if any(not isinstance(new_name[1], string_types)
for new_name in new_names):
raise ValueError('New channel mapping must only be to strings')
# do the remapping locally
for c_ind, new_name in new_names:
for bi, bad in enumerate(bads):
if bad == ch_names[c_ind]:
bads[bi] = new_name
ch_names[c_ind] = new_name
# check that all the channel names are unique
if len(ch_names) != len(np.unique(ch_names)):
raise ValueError('New channel names are not unique, renaming failed')
# do the reampping in info
info['bads'] = bads
for ch, ch_name in zip(info['chs'], ch_names):
ch['ch_name'] = ch_name
info._update_redundant()
info._check_consistency()
def _recursive_flatten(cell, dtype):
"""Helper to unpack mat files in Python"""
while not isinstance(cell[0], dtype):
cell = [c for d in cell for c in d]
return cell
def read_ch_connectivity(fname, picks=None):
"""Parse FieldTrip neighbors .mat file
More information on these neighbor definitions can be found on the
related FieldTrip documentation pages:
http://fieldtrip.fcdonders.nl/template/neighbours
Parameters
----------
fname : str
The file name. Example: 'neuromag306mag', 'neuromag306planar',
'ctf275', 'biosemi64', etc.
picks : array-like of int, shape (n_channels,)
The indices of the channels to include. Must match the template.
Defaults to None.
Returns
-------
ch_connectivity : scipy.sparse matrix
The connectivity matrix.
ch_names : list
The list of channel names present in connectivity matrix.
"""
from scipy.io import loadmat
if not op.isabs(fname):
templates_dir = op.realpath(op.join(op.dirname(__file__),
'data', 'neighbors'))
templates = os.listdir(templates_dir)
for f in templates:
if f == fname:
break
if f == fname + '_neighb.mat':
fname += '_neighb.mat'
break
else:
raise ValueError('I do not know about this neighbor '
'template: "{}"'.format(fname))
fname = op.join(templates_dir, fname)
nb = loadmat(fname)['neighbours']
ch_names = _recursive_flatten(nb['label'], string_types)
neighbors = [_recursive_flatten(c, string_types) for c in
nb['neighblabel'].flatten()]
assert len(ch_names) == len(neighbors)
if picks is not None:
if max(picks) >= len(ch_names):
raise ValueError('The picks must be compatible with '
'channels. Found a pick ({}) which exceeds '
'the channel range ({})'
.format(max(picks), len(ch_names)))
connectivity = _ch_neighbor_connectivity(ch_names, neighbors)
if picks is not None:
# picking before constructing matrix is buggy
connectivity = connectivity[picks][:, picks]
ch_names = [ch_names[p] for p in picks]
return connectivity, ch_names
def _ch_neighbor_connectivity(ch_names, neighbors):
"""Compute sensor connectivity matrix
Parameters
----------
ch_names : list of str
The channel names.
neighbors : list of list
A list of list of channel names. The neighbors to
which the channels in ch_names are connected with.
Must be of the same length as ch_names.
Returns
-------
ch_connectivity : scipy.sparse matrix
The connectivity matrix.
"""
if len(ch_names) != len(neighbors):
raise ValueError('`ch_names` and `neighbors` must '
'have the same length')
set_neighbors = set([c for d in neighbors for c in d])
rest = set(ch_names) - set_neighbors
if len(rest) > 0:
raise ValueError('Some of your neighbors are not present in the '
'list of channel names')
for neigh in neighbors:
if (not isinstance(neigh, list) and
not all(isinstance(c, string_types) for c in neigh)):
raise ValueError('`neighbors` must be a list of lists of str')
ch_connectivity = np.eye(len(ch_names), dtype=bool)
for ii, neigbs in enumerate(neighbors):
ch_connectivity[ii, [ch_names.index(i) for i in neigbs]] = True
ch_connectivity = sparse.csr_matrix(ch_connectivity)
return ch_connectivity
def fix_mag_coil_types(info):
"""Fix magnetometer coil types
Parameters
----------
info : dict
The info dict to correct. Corrections are done in-place.
Notes
-----
This function changes magnetometer coil types 3022 (T1: SQ20483N) and
3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition
records in the info structure.
Neuromag Vectorview systems can contain magnetometers with two
different coil sizes (3022 and 3023 vs. 3024). The systems
incorporating coils of type 3024 were introduced last and are used at
the majority of MEG sites. At some sites with 3024 magnetometers,
the data files have still defined the magnetometers to be of type
3022 to ensure compatibility with older versions of Neuromag software.
In the MNE software as well as in the present version of Neuromag
software coil type 3024 is fully supported. Therefore, it is now safe
to upgrade the data files to use the true coil type.
.. note:: The effect of the difference between the coil sizes on the
current estimates computed by the MNE software is very small.
Therefore the use of mne_fix_mag_coil_types is not mandatory.
"""
old_mag_inds = _get_T1T2_mag_inds(info)
for ii in old_mag_inds:
info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T3
logger.info('%d of %d T1/T2 magnetometer types replaced with T3.' %
(len(old_mag_inds), len(pick_types(info, meg='mag'))))
info._check_consistency()
def _get_T1T2_mag_inds(info):
"""Helper to find T1/T2 magnetometer coil types"""
picks = pick_types(info, meg='mag')
old_mag_inds = []
for ii in picks:
ch = info['chs'][ii]
if ch['coil_type'] in (FIFF.FIFFV_COIL_VV_MAG_T1,
FIFF.FIFFV_COIL_VV_MAG_T2):
old_mag_inds.append(ii)
return old_mag_inds
|
ARudiuk/mne-python
|
mne/channels/channels.py
|
Python
|
bsd-3-clause
| 31,361
|
[
"Mayavi"
] |
b80243a4259ce431064bbee24a0afddd3be5f95ccf8ef2e50e847893b9703902
|
import numpy as np
from ase.tasks.bulk import BulkTask
from gpaw import FermiDirac, MethfesselPaxton, MixerSum, \
KohnShamConvergenceError, PoissonSolver
from gpaw.factory import GPAWFactory
a0 = 2.84
def f(name, dist, k, g):
tag = '%s-%02d-%2d' % (name, k, g)
task = BulkTask(tag=tag, lattice_constant=a0, cubic=True,
magmoms=[2.3],
fit=(5, 0.02))
factory = GPAWFactory(xc='PBE',
kpts=(k, k, k),
occupations=dist,
basis='dzp',
mixer=MixerSum(0.05, 5, 1),
eigensolver='cg',
maxiter=500,
poissonsolver=PoissonSolver(eps=1e-12),
gpts=(g, g, g))
task.set_calculator_factory(factory)
task.run('Fe')
for width in [0.05, 0.1, 0.15, 0.2]:
for k in [4, 6, 8, 10, 12]:
f('FD-%.2f' % width, FermiDirac(width), k, 12)
f('MP-%.2f' % width, MethfesselPaxton(width), k, 12)
#f('MP1-%.2f' % width, MethfesselPaxton(width, 1), k, 12)
for g in range(16, 32, 4):
f('FD-%.2f' % 0.1, FermiDirac(0.1), 8, g)
|
ajylee/gpaw-rtxs
|
doc/tutorials/lattice_constants/iron.py
|
Python
|
gpl-3.0
| 1,199
|
[
"ASE",
"GPAW"
] |
0a9dd9d1ef603f9199be47e69da9d1da65520b034c264b35de3fac260a4674d4
|
"""Contains SSIM library functions and classes."""
from __future__ import absolute_import
import argparse
import glob
import sys
import numpy as np
from scipy import signal
from ssim import compat
from ssim.compat import Image, ImageOps
from ssim.utils import convolve_gaussian_2d
from ssim.utils import get_gaussian_kernel
from ssim.utils import to_grayscale
class SSIMImage(object):
"""Wraps a PIL Image object with SSIM state.
Attributes:
img: Original PIL Image.
img_gray: grayscale Image.
img_gray_squared: squared img_gray.
img_gray_mu: img_gray convolved with gaussian kernel.
img_gray_mu_squared: squared img_gray_mu.
img_gray_sigma_squared: img_gray convolved with gaussian kernel -
img_gray_mu_squared.
"""
def __init__(self, img, gaussian_kernel_1d=None, size=None):
"""Create an SSIMImage.
Args:
img (str or PIL.Image): PIL Image object or file name.
gaussian_kernel_1d (np.ndarray, optional): Gaussian kernel
that was generated with utils.get_gaussian_kernel is used
to precompute common objects for SSIM computation
size (tuple, optional): New image size to resize image to.
"""
# Use existing or create a new PIL.Image
self.img = img if not isinstance(img, compat.basestring) \
else compat.Image.open(img)
# Resize image if size is defined and different
# from original image
if size and size != self.img.size:
self.img = self.img.resize(size, Image.ANTIALIAS)
# Set the size of the image
self.size = self.img.size
# If gaussian kernel is defined we create
# common SSIM objects
if gaussian_kernel_1d is not None:
self.gaussian_kernel_1d = gaussian_kernel_1d
# np.array of grayscale and alpha image
self.img_gray, self.img_alpha = to_grayscale(self.img)
if self.img_alpha is not None:
self.img_gray[self.img_alpha == 255] = 0
# Squared grayscale
self.img_gray_squared = self.img_gray ** 2
# Convolve grayscale image with gaussian
self.img_gray_mu = convolve_gaussian_2d(
self.img_gray, self.gaussian_kernel_1d)
# Squared mu
self.img_gray_mu_squared = self.img_gray_mu ** 2
# Convolve squared grayscale with gaussian
self.img_gray_sigma_squared = convolve_gaussian_2d(
self.img_gray_squared, self.gaussian_kernel_1d)
# Substract squared mu
self.img_gray_sigma_squared -= self.img_gray_mu_squared
# If we don't define gaussian kernel, we create
# common CW-SSIM objects
else:
# Grayscale PIL.Image
self.img_gray = ImageOps.grayscale(self.img)
class SSIM(object):
"""Computes SSIM between two images."""
def __init__(self, img, gaussian_kernel_1d=None, size=None,
l=255, k_1=0.01, k_2=0.03, k=0.01):
"""Create an SSIM object.
Args:
img (str or PIL.Image): Reference image to compare other images to.
l, k_1, k_2 (float): SSIM configuration variables.
k (float): CW-SSIM configuration variable (default 0.01)
gaussian_kernel_1d (np.ndarray, optional): Gaussian kernel
that was generated with utils.get_gaussian_kernel is used
to precompute common objects for SSIM computation
size (tuple, optional): resize the image to the tuple size
"""
self.k = k
# Set k1,k2 & c1,c2 to depend on L (width of color map).
self.c_1 = (k_1 * l) ** 2
self.c_2 = (k_2 * l) ** 2
self.gaussian_kernel_1d = gaussian_kernel_1d
self.img = SSIMImage(img, gaussian_kernel_1d, size)
def ssim_value(self, target):
"""Compute the SSIM value from the reference image to the target image.
Args:
target (str or PIL.Image): Input image to compare the reference image
to. This may be a PIL Image object or, to save time, an SSIMImage
object (e.g. the img member of another SSIM object).
Returns:
Computed SSIM float value.
"""
# Performance boost if handed a compatible SSIMImage object.
if not isinstance(target, SSIMImage) \
or not np.array_equal(self.gaussian_kernel_1d,
target.gaussian_kernel_1d):
target = SSIMImage(target, self.gaussian_kernel_1d, self.img.size)
img_mat_12 = self.img.img_gray * target.img_gray
img_mat_sigma_12 = convolve_gaussian_2d(
img_mat_12, self.gaussian_kernel_1d)
img_mat_mu_12 = self.img.img_gray_mu * target.img_gray_mu
img_mat_sigma_12 = img_mat_sigma_12 - img_mat_mu_12
# Numerator of SSIM
num_ssim = ((2 * img_mat_mu_12 + self.c_1) *
(2 * img_mat_sigma_12 + self.c_2))
# Denominator of SSIM
den_ssim = (
(self.img.img_gray_mu_squared + target.img_gray_mu_squared +
self.c_1) *
(self.img.img_gray_sigma_squared +
target.img_gray_sigma_squared + self.c_2))
ssim_map = num_ssim / den_ssim
index = np.average(ssim_map)
return index
def cw_ssim_value(self, target, width=30):
"""Compute the complex wavelet SSIM (CW-SSIM) value from the reference
image to the target image.
Args:
target (str or PIL.Image): Input image to compare the reference image
to. This may be a PIL Image object or, to save time, an SSIMImage
object (e.g. the img member of another SSIM object).
width: width for the wavelet convolution (default: 30)
Returns:
Computed CW-SSIM float value.
"""
if not isinstance(target, SSIMImage):
target = SSIMImage(target, size=self.img.size)
# Define a width for the wavelet convolution
widths = np.arange(1, width+1)
# Use the image data as arrays
sig1 = np.asarray(self.img.img_gray.getdata())
sig2 = np.asarray(target.img_gray.getdata())
# Convolution
cwtmatr1 = signal.cwt(sig1, signal.ricker, widths)
cwtmatr2 = signal.cwt(sig2, signal.ricker, widths)
# Compute the first term
c1c2 = np.multiply(abs(cwtmatr1), abs(cwtmatr2))
c1_2 = np.square(abs(cwtmatr1))
c2_2 = np.square(abs(cwtmatr2))
num_ssim_1 = 2 * np.sum(c1c2, axis=0) + self.k
den_ssim_1 = np.sum(c1_2, axis=0) + np.sum(c2_2, axis=0) + self.k
# Compute the second term
c1c2_conj = np.multiply(cwtmatr1, np.conjugate(cwtmatr2))
num_ssim_2 = 2 * np.abs(np.sum(c1c2_conj, axis=0)) + self.k
den_ssim_2 = 2 * np.sum(np.abs(c1c2_conj), axis=0) + self.k
# Construct the result
ssim_map = (num_ssim_1 / den_ssim_1) * (num_ssim_2 / den_ssim_2)
# Average the per pixel results
index = np.average(ssim_map)
return index
def main():
"""Main function for pyssim."""
description = '\n'.join([
'Compares an image with a list of images using the SSIM metric.',
' Example:',
' pyssim test-images/test1-1.png "test-images/*"'
])
parser = argparse.ArgumentParser(
prog='pyssim', formatter_class=argparse.RawTextHelpFormatter,
description=description)
parser.add_argument('--cw', help='compute the complex wavelet SSIM',
action='store_true')
parser.add_argument(
'base_image', metavar='image1.png', type=argparse.FileType('r'))
parser.add_argument(
'comparison_images', metavar='image path with* or image2.png')
parser.add_argument('--width', type=int, default=None,
help='scales the image before computing SSIM')
parser.add_argument('--height', type=int, default=None,
help='scales the image before computing SSIM')
args = parser.parse_args()
if args.width and args.height:
size = (args.width, args.height)
else:
size = None
if not args.cw:
gaussian_kernel_sigma = 1.5
gaussian_kernel_width = 11
gaussian_kernel_1d = get_gaussian_kernel(
gaussian_kernel_width, gaussian_kernel_sigma)
comparison_images = glob.glob(args.comparison_images)
is_a_single_image = len(comparison_images) == 1
for comparison_image in comparison_images:
if args.cw:
ssim = SSIM(args.base_image.name, size=size)
ssim_value = ssim.cw_ssim_value(comparison_image)
else:
ssim = SSIM(args.base_image.name, gaussian_kernel_1d, size=size)
ssim_value = ssim.ssim_value(comparison_image)
if is_a_single_image:
sys.stdout.write('%.7g' % ssim_value)
else:
sys.stdout.write('%s - %s: %.7g' % (
args.base_image.name, comparison_image, ssim_value))
sys.stdout.write('\n')
if __name__ == '__main__':
main()
|
jterrace/pyssim
|
ssim/ssimlib.py
|
Python
|
mit
| 9,166
|
[
"Gaussian"
] |
b73b71dd7d57aca62550232d17992d170597a604867d18f28cf4ee46b9c41d16
|
#!/usr/bin/env python3
import os
import pkg_resources
import tbapy
__version__ = pkg_resources.require("FRCUploader")[0].version
# Default Variables
DEBUG = False # DON'T COMMIT THIS LINE IF TRUE
DEFAULT_TAGS = "{}, frcuploader, FIRST, omgrobots, FRC, FIRST Robotics Competition, robots, Robotics, {game}"
MATCH_TYPE = ("qm", "qf", "sf", "f1m")
DEFAULT_DESCRIPTION = """Footage of the {ename} is courtesy of {team}.
Red Alliance ({red1}, {red2}, {red3}) - {redscore}
Blue Alliance ({blue3}, {blue2}, {blue1}) - {bluescore}
To view match schedules and results for this event, visit The Blue Alliance Event Page: https://www.thebluealliance.com/event/{ecode}
Follow us on Twitter (@{twit}) and Facebook ({fb}).
For more information and future event schedules, visit our website: {weblink}
Thanks for watching!"""
NO_TBA_DESCRIPTION = """Footage of the {ename} Event is courtesy of {team}.
Follow us on Twitter (@{twit}) and Facebook ({fb}).
For more information and future event schedules, visit our website: {weblink}
Thanks for watching!"""
CREDITS = """
Uploaded with FRC-YouTube-Uploader (https://github.com/NikhilNarayana/FRC-YouTube-Uploader) by Nikhil Narayana"""
VALID_PRIVACY_STATUSES = ("public", "unlisted", "private")
GAMES = {
"2022": "Rapid React, RAPID REACT",
"2021": "FIRST Rise: Infinite Recharge, Rise: INFINITE RECHARGE, INFINITE RECHARGE",
"2020": "FIRST Rise: Infinite Recharge, Rise: INFINITE RECHARGE, INFINITE RECHARGE",
"2019": "FIRST Destination: Deep Space, Destination: Deep Space, Deep Space",
"2018": "FIRST Power Up, FIRST POWER UP",
"2017": "FIRST Steamworks, FIRST STEAMworks",
"2016": "FIRST Stronghold",
"2015": "Recycle Rush",
"2014": "Aerial Assist",
"2013": "Ultimate Ascent",
}
# Extra Stuff
abbrv = "frc"
short_name = "frcuploader"
long_name = "FRC YouTube Uploader"
row_range = "Data!A1:G1"
first_run = True
stop_thread = False
response = None
status = None
error = None
sleep_minutes = 600
retry = 0
youtube = None
tba = tbapy.TBA("wvIxtt5Qvbr2qJtqW7ZsZ4vNppolYy0zMNQduH8LdYA7v2o1myt8ZbEOHAwzRuqf")
trusted = False
sizes = ("bytes", "KB", "MB", "GB", "TB")
cerem = (
"None",
"Opening Ceremonies",
"Alliance Selection",
"Closing Ceremonies",
"Highlight Reel",
)
frc_folder = os.path.join(os.path.expanduser("~"), ".frcuploader")
yt_accounts_folder = os.path.join(frc_folder, "accounts")
youtube_oauth_file = os.path.join(frc_folder, "frc-oauth2-youtube.json")
os.makedirs(yt_accounts_folder, exist_ok=True)
queue_values = os.path.join(frc_folder, "frc_queue_values.txt")
form_values = os.path.join(frc_folder, "frc_form_values.json")
log_file = os.path.join(frc_folder, "frc_log.txt")
rec_formats = (".ts", ".mkv", ".avi", ".mp4", ".flv", ".mov")
|
NikhilNarayana/FRC-YouTube-Uploader
|
frcuploader/consts.py
|
Python
|
gpl-3.0
| 2,768
|
[
"VisIt"
] |
c7be91059d1ad28c9354a3c945ef780de4ef0f04212b7c3dc2f35c84f26ff2d2
|
import numpy as np
# Conditional import machinery for vtk
from dipy.utils.optpkg import optional_package
# Allow import, but disable doctests if we don't have vtk
vtk, have_vtk, setup_module = optional_package('vtk')
if have_vtk:
vtkInteractorStyleUser = vtk.vtkInteractorStyleUser
# version = vtk.vtkVersion.GetVTKSourceVersion().split(' ')[-1]
# major_version = vtk.vtkVersion.GetVTKMajorVersion()
else:
vtkInteractorStyleUser = object
class Event(object):
def __init__(self):
self.position = None
self.name = None
self.key = None
self._abort_flag = None
@property
def abort_flag(self):
return self._abort_flag
def update(self, event_name, interactor):
""" Updates current event information. """
self.name = event_name
self.position = np.asarray(interactor.GetEventPosition())
self.key = interactor.GetKeySym()
self._abort_flag = False # Reset abort flag
def abort(self):
""" Aborts the event i.e. do not propagate it any further. """
self._abort_flag = True
def reset(self):
""" Done with the current event. Reset the attributes. """
self.position = None
self.name = None
self.key = None
self._abort_flag = False
class CustomInteractorStyle(vtkInteractorStyleUser):
""" Manipulate the camera and interact with objects in the scene.
This interactor style allows the user to interactively manipulate (pan,
rotate and zoom) the camera. It also allows the user to interact (click,
scroll, etc.) with objects in the scene.
Several events handling methods from :class:`vtkInteractorStyleUser` have
been overloaded to allow the propagation of the events to the objects the
user is interacting with.
In summary, while interacting with the scene, the mouse events are as
follows:
- Left mouse button: rotates the camera
- Right mouse button: dollys the camera
- Mouse wheel: dollys the camera
- Middle mouse button: pans the camera
"""
def __init__(self):
# Default interactor is responsible for moving the camera.
self.default_interactor = vtk.vtkInteractorStyleTrackballCamera()
# The picker allows us to know which object/actor is under the mouse.
self.picker = vtk.vtkPropPicker()
self.chosen_element = None
self.event = Event()
# Define some interaction states
self.left_button_down = False
self.right_button_down = False
self.middle_button_down = False
self.active_props = set()
self.selected_props = {"left_button": set(),
"right_button": set(),
"middle_button": set()}
def add_active_prop(self, prop):
self.active_props.add(prop)
def remove_active_prop(self, prop):
self.active_props.remove(prop)
def get_prop_at_event_position(self):
""" Returns the prop that lays at the event position. """
# TODO: return a list of items (i.e. each level of the assembly path).
event_pos = self.GetInteractor().GetEventPosition()
self.picker.Pick(event_pos[0], event_pos[1], 0,
self.GetCurrentRenderer())
path = self.picker.GetPath()
if path is None:
return None
node = path.GetLastNode()
prop = node.GetViewProp()
return prop
def propagate_event(self, evt, *props):
for prop in props:
# Propagate event to the prop.
prop.InvokeEvent(evt)
if self.event.abort_flag:
return
def on_left_button_down(self, obj, evt):
self.left_button_down = True
prop = self.get_prop_at_event_position()
if prop is not None:
self.selected_props["left_button"].add(prop)
self.propagate_event(evt, prop)
if not self.event.abort_flag:
self.default_interactor.OnLeftButtonDown()
def on_left_button_up(self, obj, evt):
self.left_button_down = False
self.propagate_event(evt, *self.selected_props["left_button"])
self.selected_props["left_button"].clear()
self.default_interactor.OnLeftButtonUp()
def on_right_button_down(self, obj, evt):
self.right_button_down = True
prop = self.get_prop_at_event_position()
if prop is not None:
self.selected_props["right_button"].add(prop)
self.propagate_event(evt, prop)
if not self.event.abort_flag:
self.default_interactor.OnRightButtonDown()
def on_right_button_up(self, obj, evt):
self.right_button_down = False
self.propagate_event(evt, *self.selected_props["right_button"])
self.selected_props["right_button"].clear()
self.default_interactor.OnRightButtonUp()
def on_middle_button_down(self, obj, evt):
self.middle_button_down = True
prop = self.get_prop_at_event_position()
if prop is not None:
self.selected_props["middle_button"].add(prop)
self.propagate_event(evt, prop)
if not self.event.abort_flag:
self.default_interactor.OnMiddleButtonDown()
def on_middle_button_up(self, obj, evt):
self.middle_button_down = False
self.propagate_event(evt, *self.selected_props["middle_button"])
self.selected_props["middle_button"].clear()
self.default_interactor.OnMiddleButtonUp()
def on_mouse_move(self, obj, evt):
# Only propagate events to active or selected props.
self.propagate_event(evt, *(self.active_props |
self.selected_props["left_button"] |
self.selected_props["right_button"] |
self.selected_props["middle_button"]))
self.default_interactor.OnMouseMove()
def on_mouse_wheel_forward(self, obj, evt):
# First, propagate mouse wheel event to underneath prop.
prop = self.get_prop_at_event_position()
if prop is not None:
self.propagate_event(evt, prop)
# Then, to the active props.
if not self.event.abort_flag:
self.propagate_event(evt, *self.active_props)
# Finally, to the default interactor.
if not self.event.abort_flag:
self.default_interactor.OnMouseWheelForward()
self.event.reset()
def on_mouse_wheel_backward(self, obj, evt):
# First, propagate mouse wheel event to underneath prop.
prop = self.get_prop_at_event_position()
if prop is not None:
self.propagate_event(evt, prop)
# Then, to the active props.
if not self.event.abort_flag:
self.propagate_event(evt, *self.active_props)
# Finally, to the default interactor.
if not self.event.abort_flag:
self.default_interactor.OnMouseWheelBackward()
self.event.reset()
def on_char(self, obj, evt):
self.propagate_event(evt, *self.active_props)
def on_key_press(self, obj, evt):
self.propagate_event(evt, *self.active_props)
def on_key_release(self, obj, evt):
self.propagate_event(evt, *self.active_props)
def SetInteractor(self, interactor):
# Internally, `InteractorStyle` objects need a handle to a
# `vtkWindowInteractor` object and this is done via `SetInteractor`.
# However, this has the side effect of adding directly all their
# observers to the `interactor`!
self.default_interactor.SetInteractor(interactor)
# Remove all observers *most likely* (cannot guarantee that the
# interactor didn't already have these observers) added by
# `vtkInteractorStyleTrackballCamera`, i.e. our `default_interactor`.
#
# Note: Be sure that no observer has been manually added to the
# `interactor` before setting the InteractorStyle.
interactor.RemoveObservers("TimerEvent")
interactor.RemoveObservers("EnterEvent")
interactor.RemoveObservers("LeaveEvent")
interactor.RemoveObservers("ExposeEvent")
interactor.RemoveObservers("ConfigureEvent")
interactor.RemoveObservers("CharEvent")
interactor.RemoveObservers("KeyPressEvent")
interactor.RemoveObservers("KeyReleaseEvent")
interactor.RemoveObservers("MouseMoveEvent")
interactor.RemoveObservers("LeftButtonPressEvent")
interactor.RemoveObservers("RightButtonPressEvent")
interactor.RemoveObservers("MiddleButtonPressEvent")
interactor.RemoveObservers("LeftButtonReleaseEvent")
interactor.RemoveObservers("RightButtonReleaseEvent")
interactor.RemoveObservers("MiddleButtonReleaseEvent")
interactor.RemoveObservers("MouseWheelForwardEvent")
interactor.RemoveObservers("MouseWheelBackwardEvent")
# This class is a `vtkClass` (instead of `object`), so `super()`
# cannot be used. Also the method `SetInteractor` is not overridden in
# `vtkInteractorStyleUser` so we have to call directly the one from
# `vtkInteractorStyle`. In addition to setting the interactor, the
# following line adds the necessary hooks to listen to this instance's
# observers.
vtk.vtkInteractorStyle.SetInteractor(self, interactor)
# Keyboard events.
self.AddObserver("CharEvent", self.on_char)
self.AddObserver("KeyPressEvent", self.on_key_press)
self.AddObserver("KeyReleaseEvent", self.on_key_release)
# Mouse events.
self.AddObserver("MouseMoveEvent", self.on_mouse_move)
self.AddObserver("LeftButtonPressEvent", self.on_left_button_down)
self.AddObserver("LeftButtonReleaseEvent", self.on_left_button_up)
self.AddObserver("RightButtonPressEvent", self.on_right_button_down)
self.AddObserver("RightButtonReleaseEvent", self.on_right_button_up)
self.AddObserver("MiddleButtonPressEvent", self.on_middle_button_down)
self.AddObserver("MiddleButtonReleaseEvent", self.on_middle_button_up)
# Windows and special events.
# TODO: we ever find them useful we could support them.
# self.AddObserver("TimerEvent", self.on_timer)
# self.AddObserver("EnterEvent", self.on_enter)
# self.AddObserver("LeaveEvent", self.on_leave)
# self.AddObserver("ExposeEvent", self.on_expose)
# self.AddObserver("ConfigureEvent", self.on_configure)
# These observers need to be added directly to the interactor because
# `vtkInteractorStyleUser` does not support wheel events prior 7.1. See
# https://github.com/Kitware/VTK/commit/373258ed21f0915c425eddb996ce6ac13404be28
interactor.AddObserver("MouseWheelForwardEvent",
self.on_mouse_wheel_forward)
interactor.AddObserver("MouseWheelBackwardEvent",
self.on_mouse_wheel_backward)
def force_render(self):
""" Causes the renderer to refresh. """
self.GetInteractor().GetRenderWindow().Render()
def add_callback(self, prop, event_type, callback, priority=0, args=[]):
""" Adds a callback associated to a specific event for a VTK prop.
Parameters
----------
prop : vtkProp
event_type : event code
callback : function
priority : int
"""
def _callback(obj, event_name):
# Update event information.
self.event.update(event_name, self.GetInteractor())
callback(self, prop, *args)
prop.AddObserver(event_type, _callback, priority)
|
nilgoyyou/dipy
|
dipy/viz/interactor.py
|
Python
|
bsd-3-clause
| 11,768
|
[
"VTK"
] |
005f5fe7cc63c6d48626fbb24cbd9ae7d6d258dcc9389d750e913fdaa61b3e5c
|
import pygame
import random
import time
import math
import psycopg2
class Database:
def __init__(self):
# Connect to an existing database
self.connection = psycopg2.connect("dbname=postgres user=postgres password= host=localhost")
# Open a cursor to perform database operations
self.cursor = self.connection.cursor()
def select_query(self, query, fetch_all, query_data = None):
# example to call query data.select_query("""SELECT * FROM players;""")
self.cursor.execute(query)
if fetch_all:
results = self.cursor.fetchall()
else:
results = self.cursor.fetchone()
return results
def query(self, query, query_data):
# example to call data.select_query("INSERT INTO test (num, data) VALUES (%s, %s)",(100, "abc'def"))
self.cursor.execute(query, query_data)
self.cursor.execute("""COMMIT;""")
def new_player(self, input_player):
# Get al users from the database
all_users = self.select_query("""SELECT * FROM players""", True)
# get all categories from the database
all_categories = self.select_query("""SELECT * FROM categories""", True)
# Loop through all players
print("-----------------------------------------")
print(input_player)
print(turns.currentplayer.category)
print(all_users)
print(all_categories)
time.sleep(1)
for user in all_users:
# check if player exists if not create new one
if user[2] == input_player:
# Player already exists do not create new data
main.new_player = True
break
else:
# Player does not exist , new data needs to be created
main.new_player = False
# Loop through all categories
for category in all_categories:
# if the users category matches one out of the database
if category[1] == turns.currentplayer.category:
new_player_cat_id = category[0]
break
if not main.new_player:
print("inserting query")
# inserting new player in the database
self.query("""INSERT INTO players (wins, name, category) VALUES (%s, %s, %s)""", (0, input_player.lower(), new_player_cat_id))
def player_win(self):
# put an extra point at the score
if not main.player_won:
database_players = data.select_query("""SELECT * FROM players;""", True)
# Loop through all players
for player in database_players:
# if player matches the current player winning break the loop
if player[2] == turns.currentplayer.name:
win_player = player
break
# give the winning player an extra winning point
win_player_score = win_player[1] + 1
win_player_name = win_player[2]
win_player_id = win_player[0]
if turns.currentplayer.category == "Sports":
win_player_category = 1
elif turns.currentplayer.category == "Geography":
win_player_category = 2
elif turns.currentplayer.category == "Entertainment":
win_player_category = 3
elif turns.currentplayer.category == "History":
win_player_category = 4
data.query("""UPDATE players SET wins = (%s), category = (%s) WHERE name = (%s) AND id = (%s)""",
(win_player_score, win_player_category, win_player_name, win_player_id))
main.player_won = True
def __del__(self):
# Make the changes to the database
self.connection.commit()
# Close communication with the database
self.cursor.close()
self.connection.close()
# make some colors
class color:
def __init__(self):
self.blue = (0, 85, 255)
self.black = (0, 0, 0)
self.white = (255, 255, 255)
self.red = (255, 0, 0)
self.green = (0, 255, 0)
self.yellow = (237, 245, 16)
self.grey = (128, 128, 128)
# class for the pagenumbers
class page:
def __init__(self, pagenumber):
self.pagenumber = pagenumber
# methods to change the page number
def setmainpage(self):
self.pagenumber = 0
print("mainpage")
def setinstructionpage(self):
self.pagenumber = 2
print("instructionpage")
def setinstructionpage2(self):
self.pagenumber = 18
print("instructionpage2")
def setinstructionpage3(self):
self.pagenumber = 17
print("instructionpage3")
def setquestionpage(self):
self.pagenumber = 4
print("questionpage")
def setsportquestionpage(self):
self.pagenumber = 5
print("sport_questionpage")
def sethistoryquestionpage(self):
self.pagenumber = 14
print("history_questionpage")
def setentertainmentquestionpage(self):
self.pagenumber = 13
print("entertainment_questionpage")
def setgeographyquestionpage(self):
self.pagenumber = 15
print("geography_questionpage")
def setanswercorrect(self):
self.pagenumber = 6
print("answerpage")
def setanswerincorrect(self):
self.pagenumber = 7
print("answerpage")
def setgamepage(self):
self.pagenumber = 1
print("gamepage")
def start(self):
self.pagenumber = 11
print("start")
def whostarts(self):
self.pagenumber = 3
print("whostarts")
def diceroll(self):
self.pagenumber = 12
print("diceroll")
def diceroll2(self):
self.pagenumber = 16
print("extra diceroll")
def highscore(self):
self.pagenumber = 40
print("Highscorepage")
def check(self):
if timer.number == 0:
self.pagenumber = 7
timer.number = 50
pygame.mixer.music.load("wrong.mp3")
pygame.mixer.music.play()
# class for a default font
class Font:
def __init__(self):
self.font_dynamix = "./Fonts/Dynamix.ttf"
self.font_orange_juice = "./Fonts/orangejuice.ttf"
self.font_skater = "./Fonts/Skater.ttf"
self.font_doctor_light = "./Fonts/Doctor_Light.ttf"
self.font_doctor_bold = "./Fonts/Doctor_Bold.ttf"
def settings(self, filename, size, text, color):
# Font color RGB in Tulp
color = color
# Font position
# Determinate font size and font file
# for no font file put the filename value at None
font = pygame.font.Font(filename, size)
# put out the string with color
font_text = font.render(text, 1, color)
return font_text
Font = Font()
# class with an empty string and a character counter. the values are reset on certain button presses
class Input:
def __init__(self):
self.string = ""
self.characters = 0
# class for buttons With background(clickable)
class button:
def __init__(self, msg, x, y, w, h, ic, ac, action):
self.msg = msg
self.x = x
self.y = y
self.w = w
self.h = h
self.ic = ic
self.ac = ac
self.action = action
# get the state of the mouse
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
# check if the state fits the requirements for a buttonpress
if self.x + self.w > mouse[0] > self.x and self.y + self.h > mouse[1] > y:
# change colour of the button while mouse on button
pygame.draw.rect(main.screen, self.ac, (self.x, self.y, self.w, self.h), 3)
# perform the actions
if click[0] == 1:
if self.action == "game":
page.setgamepage()
time.sleep(1)
print(page.pagenumber)
if self.action == "instruction":
page.setinstructionpage()
print(page.pagenumber)
if self.action == "quit":
pygame.QUIT()
if self.action == "start" and gamepage.playerschosen:
page.start()
names.n = 0
time.sleep(1)
print(page.pagenumber)
whostarts.categories = ["Sports", "Geography", "Entertainment", "History"]
if self.action == "start" and not gamepage.playerschosen:
print("select players please")
print(page.pagenumber)
if self.action == "main":
page.setmainpage()
main.player_won = False
gamepage.buttonstwo = True
print(gamepage.buttonstwo)
gamepage.buttonsthree = True
print(gamepage.buttonsthree)
gamepage.buttonsfour = True
print(gamepage.buttonsfour)
print(page.pagenumber)
if self.action == "whostarts":
page.whostarts()
print(page.pagenumber)
if self.action == "two":
gamepage.playerstwo()
gamepage.playerschosen = True
gamepage.buttonsthree = False
gamepage.buttonsfour = False
print(gamepage.players)
if self.action == "three":
gamepage.playersthree()
gamepage.playerschosen = True
gamepage.buttonstwo = False
gamepage.buttonsfour = False
print(gamepage.players)
if self.action == "four":
gamepage.playersfour()
gamepage.playerschosen = True
gamepage.buttonstwo = False
gamepage.buttonsthree = False
print(gamepage.players)
if self.action == "Sports":
turns.currentplayer.category = "Sports"
turns.currentplayer.xx = 367
turns.currentplayer.yy = 520
whostarts.categories.remove("Sports")
whostarts.chosen += 1
turns.next()
print(whostarts.categories)
print(turns.list)
print(turns.currentplayer.category)
print(turns.currentplayer.name)
print(main.listremove)
if self.action == "Geography":
turns.currentplayer.category = "Geography"
turns.currentplayer.xx = 430
turns.currentplayer.yy = 520
whostarts.categories.remove("Geography")
whostarts.chosen += 1
turns.next()
print(whostarts.categories)
print(turns.list)
print(turns.currentplayer.category)
print(turns.currentplayer.name)
print(main.listremove)
if self.action == "Entertainment":
turns.currentplayer.category = "Entertainment"
turns.currentplayer.xx = 241
turns.currentplayer.yy = 520
whostarts.categories.remove("Entertainment")
whostarts.chosen += 1
turns.next()
print(whostarts.categories)
print(turns.list)
print(turns.currentplayer.category)
print(turns.currentplayer.name)
print(main.listremove)
if self.action == "History":
turns.currentplayer.category = "History"
turns.currentplayer.xx = 556
turns.currentplayer.yy = 520
whostarts.categories.remove("History")
whostarts.chosen += 1
turns.next()
print(whostarts.categories)
print(turns.list)
print(turns.currentplayer.category)
print(turns.currentplayer.name)
print(main.listremove)
if self.action == "OK":
main.stop_loop = False
x = turns.currentplayer.category
if x == "Sports":
page.setsportquestionpage()
time.sleep(1)
elif x == "Geography":
page.setgeographyquestionpage()
time.sleep(1)
elif x == "Entertainment":
page.setentertainmentquestionpage()
time.sleep(1)
elif x == "History":
page.sethistoryquestionpage()
time.sleep(1)
if self.action == "questions":
page.setquestionpage()
if self.action == "correct_answer":
pygame.mixer.music.load("./Game_Audio/correct.mp3")
pygame.mixer.music.play()
page.setanswercorrect()
diceroll.update()
turns.currentplayer.correct = True
timer.number = 50
time.sleep(1)
if self.action == "wrong_answer":
pygame.mixer.music.load("./Game_Audio/wrong.mp3")
pygame.mixer.music.play()
page.setanswerincorrect()
timer.number = 50
time.sleep(1)
if self.action == "proceed":
print("proceed")
print(turns.currentplayer.name)
if turns.currentplayer.correct:
print(turns.currentplayer.xx, turns.currentplayer.yy)
turns.currentplayer.update()
print(turns.currentplayer.name)
print(turns.currentplayer.xx, turns.currentplayer.yy)
print(str(main.extra) + "hello??")
if main.extra:
page.diceroll2()
time.sleep(1)
if not main.extra:
page.whostarts()
time.sleep(1)
if turns.currentplayer.won:
page.pagenumber = 30
if self.action == "nextplayer":
turns.next()
time.sleep(0.2)
print(turns.currentplayer.name)
if self.action == "diceroll":
page.diceroll()
print(page.pagenumber)
if self.action == "up":
turns.currentplayer.direction = "up"
print(turns.currentplayer.direction)
if self.action == "down":
turns.currentplayer.direction = "down"
print(turns.currentplayer.direction)
if self.action == "left":
turns.currentplayer.direction = "left"
print(turns.currentplayer.direction)
if self.action == "right":
turns.currentplayer.direction = "right"
print(turns.currentplayer.direction)
if self.action == "up2":
diceroll2.movingplayer.direction = "up"
print(diceroll2.movingplayer.direction)
if self.action == "down2":
diceroll2.movingplayer.direction = "down"
print(diceroll2.movingplayer.direction)
if self.action == "left2":
diceroll2.movingplayer.direction = "left"
print(diceroll2.movingplayer.direction)
if self.action == "right2":
diceroll2.movingplayer.direction = "right"
print(diceroll2.movingplayer.direction)
if self.action == "proceed2":
print("playerdiced")
print(diceroll2.movingplayer.name)
print(turns.currentplayer.xx, turns.currentplayer.yy)
diceroll2.movingplayer.update2()
print(turns.currentplayer.xx, turns.currentplayer.yy)
main.extra = False
page.whostarts()
if self.action == "update":
time.sleep(0.1)
self.random = random.randint(1, 4)
if self.random == 1:
page.pagenumber = 22
elif self.random == 2:
page.pagenumber = 23
elif self.random == 3:
page.pagenumber = 21
elif self.random == 4:
page.pagenumber = 24
print(page.pagenumber)
if self.action == "settings":
page.pagenumber = 31
if self.action == "asknames":
page.pagenumber = 32
print(page.pagenumber)
time.sleep(1)
if self.action == "highscores":
page.highscore()
if self.action == "sound0":
main.sound = True
print("sound on")
if self.action == "sound1":
main.sound = False
print("sound off")
if self.action == "soundtest":
if main.sound:
pygame.mixer.music.load("./Game_Audio/wrong.mp3")
pygame.mixer.music.play()
time.sleep(1)
print("playing sound")
if self.action == "changeplayer":
names.n += 1
turns.currentplayer.name = Input.string
turns.next()
Input.string = ""
Input.characters = 0
time.sleep(0.2)
else:
pass
# class for buttons drawing(clickable)
class button_draw:
def __init__(self, msg, x, y, w, h, ic, ac, action):
self.msg = msg
self.x = x
self.y = y
self.w = w
self.h = h
self.ic = ic
self.ac = ac
self.action = action
self.random = 0
# get the state of the mouse
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
# check if the state fits the requirements for a buttonpress
if self.x + self.w > mouse[0] > self.x and self.y + self.h > mouse[1] > y:
# change colour of the button while mouse on button
pygame.draw.rect(main.screen, self.ic, (self.x, self.y, self.w, self.h))
pygame.draw.rect(main.screen, self.ac, (self.x, self.y, self.w, self.h), 3)
# perform the actions
if click[0] == 1:
if self.action == "game":
page.setgamepage()
time.sleep(1)
print(page.pagenumber)
if self.action == "instruction":
page.setinstructionpage()
print(page.pagenumber)
if self.action == "quit":
pygame.QUIT()
if self.action == "start" and gamepage.playerschosen:
page.start()
time.sleep(1)
print(page.pagenumber)
if self.action == "start" and not gamepage.playerschosen:
print("select players please")
print(page.pagenumber)
if self.action == "main":
page.setmainpage()
gamepage.buttonstwo = True
gamepage.buttonsthree = True
gamepage.buttonsfour = True
print(page.pagenumber)
if self.action == "whostarts":
names.n = 0
page.whostarts()
print(page.pagenumber)
if self.action == "two":
gamepage.playerstwo()
gamepage.playerschosen = True
gamepage.buttonsthree = False
gamepage.buttonsfour = False
print(gamepage.players)
if self.action == "three":
gamepage.playersthree()
gamepage.playerschosen = True
gamepage.buttonstwo = False
gamepage.buttonsfour = False
print(gamepage.players)
if self.action == "four":
gamepage.playersfour()
gamepage.playerschosen = True
gamepage.buttonstwo = False
gamepage.buttonsthree = False
print(gamepage.players)
if self.action == "Sports":
turns.currentplayer.category = "Sports"
turns.currentplayer.xx = 367
turns.currentplayer.yy = 518
whostarts.categories.remove("Sports")
whostarts.chosen += 1
turns.next()
print(whostarts.categories)
print(turns.list)
print(turns.currentplayer.category)
print(turns.currentplayer.name)
print(main.listremove)
if self.action == "Geography":
turns.currentplayer.category = "Geography"
turns.currentplayer.xx = 430
turns.currentplayer.yy = 518
whostarts.categories.remove("Geography")
whostarts.chosen += 1
turns.next()
print(whostarts.categories)
print(turns.list)
print(turns.currentplayer.category)
print(turns.currentplayer.name)
print(main.listremove)
if self.action == "Entertainment":
turns.currentplayer.category = "Entertainment"
turns.currentplayer.xx = 241
turns.currentplayer.yy = 518
whostarts.categories.remove("Entertainment")
whostarts.chosen += 1
turns.next()
print(whostarts.categories)
print(turns.list)
print(turns.currentplayer.category)
print(turns.currentplayer.name)
print(main.listremove)
if self.action == "History":
turns.currentplayer.category = "History"
turns.currentplayer.xx = 556
turns.currentplayer.yy = 518
whostarts.categories.remove("History")
whostarts.chosen += 1
turns.next()
print(whostarts.categories)
print(turns.list)
print(turns.currentplayer.category)
print(turns.currentplayer.name)
print(main.listremove)
if self.action == "OK":
main.stop_loop = False
x = turns.currentplayer.category
if x == "Sports":
page.setsportquestionpage()
time.sleep(1)
elif x == "Geography":
page.setgeographyquestionpage()
time.sleep(1)
elif x == "Entertainment":
page.setentertainmentquestionpage()
time.sleep(1)
elif x == "History":
page.sethistoryquestionpage()
time.sleep(1)
if self.action == "cat_sports":
page.setsportquestionpage()
time.sleep(1)
if self.action == "questions":
page.setquestionpage()
if self.action == "correct_answer":
pygame.mixer.music.load("./Game_Audio/correct.mp3")
pygame.mixer.music.play()
page.setanswercorrect()
diceroll.update()
turns.currentplayer.correct = True
timer.number = 50
time.sleep(1)
if self.action == "wrong_answer":
pygame.mixer.music.load("./Game_Audio/wrong.mp3")
pygame.mixer.music.play()
page.setanswerincorrect()
timer.number = 50
time.sleep(1)
if self.action == "proceed":
diceroll.showbuttons = False
print("proceed")
print(turns.currentplayer.name)
print(turns.currentplayer.correct)
if turns.currentplayer.correct:
print(turns.currentplayer.xx, turns.currentplayer.yy)
turns.currentplayer.update()
turns.currentplayer.correct = False
print(turns.currentplayer.name)
print(turns.currentplayer.xx, turns.currentplayer.yy)
print(str(main.extra) + "hello??")
if main.extra:
page.diceroll2()
time.sleep(1)
if not main.extra:
page.whostarts()
time.sleep(0.2)
if turns.currentplayer.won:
page.pagenumber = 30
if self.action == "nextplayer":
turns.next()
time.sleep(0.2)
print(turns.currentplayer.name)
if self.action == "diceroll":
page.diceroll()
print(page.pagenumber)
if self.action == "up":
turns.currentplayer.direction = "up"
print(turns.currentplayer.direction)
if self.action == "down":
turns.currentplayer.direction = "down"
print(turns.currentplayer.direction)
if self.action == "left":
turns.currentplayer.direction = "left"
print(turns.currentplayer.direction)
if self.action == "right":
turns.currentplayer.direction = "right"
print(turns.currentplayer.direction)
if self.action == "up2":
diceroll2.movingplayer.direction = "up"
print(diceroll2.movingplayer.direction)
if self.action == "down2":
diceroll2.movingplayer.direction = "down"
print(diceroll2.movingplayer.direction)
if self.action == "left2":
diceroll2.movingplayer.direction = "left"
print(diceroll2.movingplayer.direction)
if self.action == "right2":
diceroll2.movingplayer.direction = "right"
print(diceroll2.movingplayer.direction)
if self.action == "proceed2":
print("playerdiced")
print(diceroll2.movingplayer.name)
print(turns.currentplayer.xx, turns.currentplayer.yy)
diceroll2.movingplayer.update()
print(turns.currentplayer.xx, turns.currentplayer.yy)
main.extra = False
page.whostarts()
if self.action == "update":
time.sleep(0.1)
self.random = random.randint(1, 4)
if self.random == 1:
page.pagenumber = 22
elif self.random == 2:
page.pagenumber = 23
elif self.random == 3:
page.pagenumber = 21
elif self.random == 4:
page.pagenumber = 24
print(page.pagenumber)
if self.action == "sound0":
main.sound = True
print("sound on")
if self.action == "sound1":
main.sound = False
print("sound off")
if self.action == "soundtest":
if main.sound:
pygame.mixer.music.load("./Game_Audio/wrong.mp3")
pygame.mixer.music.play()
time.sleep(1)
print("playing sound")
if self.action == "asknames":
page.pagenumber = 32
print(page.pagenumber)
time.sleep(0.2)
if self.action == "changeplayer":
turns.currentplayer.name = Input.string
turns.next()
data.new_player(Input.string)
Input.string = ""
Input.characters = 0
time.sleep(0.2)
else:
pygame.draw.rect(main.screen, self.ic, (self.x, self.y, self.w, self.h))
# create a player class
class player:
def __init__(self, name, color, x, y, xx, yy, id):
# single coordinates for name pos, double for image pos
# newx and newy for the animation destination
# wins for inserting the value in the database
# category empty so in whostarts category can be inserted
# correct to see if answer was correct or not
# ths decides whether to go to right or wrong answerpage
# direction is empty, will gain value once pressed on directionbutton in diceroll
# id for seeing which player we are, so we can check collisions
# won is to end the game
# proceed is for if not won: proceed with checkig positions
self.x = x
self.xx = xx
self.yy = yy
self.y = y
self.newx = xx
self.newy = yy
self.name = name
self.color = color
self.wins = 0
self.category = ""
self.correct = False
self.direction = ""
self.id = id
self.won = False
self.proceed = True
self.skip = True
def update(self):
# update is to move the players. newx and newy are set, and in the draw_
# _we move the players self.xx and self.yy towards the new.xx and new.yy
print(diceroll.n)
print(self.direction)
# check directions and dicerolls and set newx/newy accordingly
if self.direction == "up":
if diceroll.n == 1 or diceroll.n == 2:
self.newy = self.yy - 32
elif diceroll.n == 3 or diceroll.n == 4:
self.newy = self.yy - 67
else:
self.newy = self.yy - 99
print("up")
elif self.direction == "down":
if diceroll.n == 1 or diceroll.n == 2:
self.newy = self.yy + 32
elif diceroll.n == 3 or diceroll.n == 4:
self.newy = self.yy + 67
else:
self.newy = self.yy + 99
print("down")
elif self.direction == "left":
if diceroll.n == 1 or diceroll.n == 2:
self.newx = self.xx - 63
elif diceroll.n == 3 or diceroll.n == 4:
self.newx = self.xx - 125
else:
self.newx = self.xx - 190
print("left")
elif self.direction == "right":
if diceroll.n == 1 or diceroll.n == 2:
self.newx = self.xx + 63
elif diceroll.n == 3 or diceroll.n == 4:
self.newx = self.xx + 125
else:
self.newx = self.xx + 190
print("right")
def update2(self):
# this update function is for extra dicerolls
# the players are instantly set to their new position without animation.
print("been here")
# check directions and dicerolls and move accordingly
if diceroll2.movingplayer.direction == "up":
if diceroll.n == 1 or diceroll.n == 2:
diceroll2.movingplayer.yy -= 32
diceroll2.movingplayer.newy = diceroll2.movingplayer.yy
elif diceroll.n == 3 or diceroll.n == 4:
diceroll2.movingplayer.yy -= 65
diceroll2.movingplayer.newy = diceroll2.movingplayer.yy
else:
diceroll2.movingplayer.yy -= 99
diceroll2.movingplayer.newy = diceroll2.movingplayer.yy
print("up")
elif diceroll2.movingplayer.direction == "down":
if diceroll.n == 1 or diceroll.n == 2:
diceroll2.movingplayer.yy += 32
diceroll2.movingplayer.newy = diceroll2.movingplayer.yy
elif diceroll.n == 3 or diceroll.n == 4:
diceroll2.movingplayer.yy += 65
diceroll2.movingplayer.newy = diceroll2.movingplayer.yy
else:
diceroll2.movingplayer.yy += 99
diceroll2.movingplayer.newy = diceroll2.movingplayer.yy
print("down")
elif diceroll2.movingplayer.direction == "left":
if diceroll.n == 1 or diceroll.n == 2:
diceroll2.movingplayer.xx -= 63
diceroll2.movingplayer.newx = diceroll2.movingplayer.xx
elif diceroll.n == 3 or diceroll.n == 4:
diceroll2.movingplayer.xx -= 125
diceroll2.movingplayer.newx = diceroll2.movingplayer.xx
else:
diceroll2.movingplayer.xx -= 190
diceroll2.movingplayer.newx = diceroll2.movingplayer.xx
print("left")
elif diceroll2.movingplayer.direction == "right":
if diceroll.n == 1 or diceroll.n == 2:
diceroll2.movingplayer.xx += 63
diceroll2.movingplayer.newx = diceroll2.movingplayer.xx
elif diceroll.n == 3 or diceroll.n == 4:
diceroll2.movingplayer.xx += 125
diceroll2.movingplayer.newx = diceroll2.movingplayer.xx
else:
diceroll2.movingplayer.xx += 190
diceroll2.movingplayer.newx = diceroll2.movingplayer.xx
print("right")
def check(self):
# this checks for any movement outside the field or any collisions with other players.
# if in the lower box, check for current position. if position changes into upper box
# adjust the self.xx accordingly. now we set self.skip False so we wont check this anymore
# if we move back to the lower box, self.skip = True
if self.yy > 205:
self.skip = True
# check if positions did not pass the borders
# here we check for the winning state
# if we won self.proceed is false. we will not check anything else anymore
if self.direction == "up":
# check if player won
if self.yy < 38:
self.xx = 400
self.newx = 400
self.newy = 20
self.yy = 20
print("you won")
self.won = True
print(self.won)
pygame.mixer.music.load("./Game_Audio/victory.mp3")
pygame.mixer.music.play()
time.sleep(1)
page.pagenumber = 30
self.proceed = False
if self.proceed:
if self.skip:
# 1st 2nd 7th 8th row check if too high, adjusted accordingly
# after adjustment skip = False, so we will no longer check this block.
# until we move down to lower block, then skip = True
# (skip is a poorly chosen name, we dont skip when True and skip when False)
if 150 < self.xx < 270:
if 100 < self.yy < 205:
print("corrected self.xx, newx(left side)")
self.xx = 305
self.newx = 305
self.skip = False
elif 530 < self.xx < 650:
if 50 < self.yy < 205:
self.xx = 495
self.newx = 495
print("corrected self.xx,newx(right side)")
self.skip = False
# middle bottown block check if at middle border
# if we are at the middle borders, we check if we go into the upper block.
# skip same as above
elif 270 < self.xx < 400:
if 50 < self.yy < 205:
self.xx = 368
self.newx = 368
print("corrected middle self.xx,newx(blue)")
self.skip = False
elif 400 < self.xx < 530:
if 50 < self.yy < 205:
self.xx = 430
self.newx = 430
print("corrected middle self.xx,newx(green)")
self.skip = False
if self.proceed:
# this is for the lower block
if self.direction == "left":
# left row check if not too far
# if too far reset self.xx and self.newx
if self.xx < 150:
self.xx = 178
self.newx = 178
print("corrected left")
if self.direction == "right":
# right row check if not too far
# if too far reset self.xx and self.newx
if self.xx > 650:
self.xx = 620
self.newx = 620
print("corrected right")
# this is for the upper block
if self.direction == "right" or self.direction == "left":
# 3rd 4th 5th 6th row if not too far left/right
# if too far, reset xpos
if 200 > self.yy > 38:
if self.xx < 275:
self.xx = 300
self.newx = 300
if self.xx > 525:
self.xx = 495
self.newx = 495
print("corrected mid left/right")
# this is for lower border
if self.direction == "down":
if self.yy > 530:
self.yy = 518
self.newy = 518
print("corrected downline")
# set the self.correct False so it doesnt stay True when going into next question
self.correct = False
# check postition and change category accordingly
if self.yy > 205:
if 150 < self.xx < 270:
self.category = "Entertainment"
elif 270 < self.xx < 400:
self.category = "Sports"
elif 400 < self.xx < 525:
self.category = "Geography"
elif 525 < self.xx < 650:
self.category = "History"
elif self.yy < 205:
if 270 < self.xx < 335:
self.category = "Entertainment"
elif 335 < self.xx < 400:
self.category = "Sports"
elif 400 < self.xx < 460:
self.category = "Geography"
elif 460 < self.xx < 525:
self.category = "History"
# check for collisions with other players
# if player1 just moved:
# check if he had collisions with any other player
# if so, send that player to diceroll2 and let him move.
if self.id == 1:
print("my id is 1")
# if player 2 has a collision with player1
if self.xx - 10 < player2.xx < self.xx + 10 and \
self.yy - 10 < player2.yy < self.yy + 10:
print("ive been here")
# the diceroll 2 page movingplayer is set to the player that was collided with
diceroll2.movingplayer = player2
print(diceroll2.movingplayer.name)
# the diceroll 2 page is updated
diceroll2.update()
print("updated")
# the diceroll 2 page is drawn
page.diceroll2()
print(page.pagenumber)
print("page = diceroll2?")
# set extra to True
main.extra = True
# if player 3 has a collision with player1
elif self.xx - 10 < player3.xx < self.xx + 10 and \
self.yy - 10 < player3.yy < self.yy + 10:
print("ive been here")
# the diceroll 2 page movingplayer is set to the player that was collided with
diceroll2.movingplayer = player3
print(diceroll2.movingplayer.name)
# the diceroll 2 page is updated
diceroll2.update()
print("updated")
# the diceroll 2 page is drawn
page.diceroll2()
print(page.pagenumber)
print("page = diceroll2?")
main.extra = True
# if player 4 has a collision with player1
elif self.xx - 10 < player4.xx < self.xx + 10 and \
self.yy - 10 < player4.yy < self.yy + 10:
print("ive been here")
# the diceroll 2 page movingplayer is set to the player that was collided with
diceroll2.movingplayer = player4
print(diceroll2.movingplayer.name)
# the diceroll 2 page is updated
diceroll2.update()
print("updated")
# the diceroll 2 page is drawn
page.diceroll2()
print(page.pagenumber)
main.extra = True
print("page = diceroll2?")
# if player2 just moved
elif self.id == 2:
# if player 1 has a collision with player1
if self.xx - 10 < player1.xx < self.xx + 10 and \
self.yy - 10 < player1.yy < self.yy + 10:
print("ive been here")
# the diceroll 2 page movingplayer is set to the player that was collided with
diceroll2.movingplayer = player1
print(diceroll2.movingplayer.name)
# the diceroll 2 page is updated
diceroll2.update()
print("updated")
# the diceroll 2 page is drawn
page.diceroll2()
main.extra = True
print(page.pagenumber)
print("page = diceroll2?")
# if player 3 has a collision with player1
elif self.xx - 10 < player3.xx < self.xx + 10 and \
self.yy - 10 < player3.yy < self.yy + 10:
print("ive been here")
# the diceroll 2 page movingplayer is set to the player that was collided with
diceroll2.movingplayer = player3
print(diceroll2.movingplayer.name)
# the diceroll 2 page is updated
diceroll2.update()
print("updated")
# the diceroll 2 page is drawn
page.diceroll2()
main.extra = True
print(page.pagenumber)
print("page = diceroll2?")
# if player 4 has a collision with player1
elif self.xx - 10 < player4.xx < self.xx + 10 and \
self.yy - 10 < player4.yy < self.yy + 10:
print("ive been here")
# the diceroll 2 page movingplayer is set to the player that was collided with
diceroll2.movingplayer = player4
print(diceroll2.movingplayer.name)
# the diceroll 2 page is updated
diceroll2.update()
print("updated")
# the diceroll 2 page is drawn
page.diceroll2()
main.extra = True
print(page.pagenumber)
print("page = diceroll2?")
elif self.id == 3:
# if player 1 has a collision with player1
if self.xx - 10 < player1.xx < self.xx + 10 and \
self.yy - 10 < player1.yy < self.yy + 10:
print("ive been here")
# the diceroll 2 page movingplayer is set to the player that was collided with
diceroll2.movingplayer = player1
print(diceroll2.movingplayer.name)
# the diceroll 2 page is updated
diceroll2.update()
print("updated")
# the diceroll 2 page is drawn
page.diceroll2()
main.extra = True
print(page.pagenumber)
print("page = diceroll2?")
# if player 3 has a collision with player1
elif self.xx - 10 < player2.xx < self.xx + 10 and \
self.yy - 10 < player2.yy < self.yy + 10:
print("ive been here")
# the diceroll 2 page movingplayer is set to the player that was collided with
diceroll2.movingplayer = player2
print(diceroll2.movingplayer.name)
# the diceroll 2 page is updated
diceroll2.update()
print("updated")
# the diceroll 2 page is drawn
page.diceroll2()
main.extra = True
print(page.pagenumber)
print("page = diceroll2?")
# if player 4 has a collision with player1
elif self.xx - 10 < player4.xx < self.xx + 10 and \
self.yy - 10 < player4.yy < self.yy + 10:
print("ive been here")
# the diceroll 2 page movingplayer is set to the player that was collided with
diceroll2.movingplayer = player4
print(diceroll2.movingplayer.name)
# the diceroll 2 page is updated
diceroll2.update()
print("updated")
# the diceroll 2 page is drawn
page.diceroll2()
main.extra = True
print(page.pagenumber)
print("page = diceroll2?")
elif self.id == 4:
# if player 1 has a collision with player1
if self.xx - 10 < player1.xx < self.xx + 10 and \
self.yy - 10 < player1.yy < self.yy + 10:
print("ive been here")
# the diceroll 2 page movingplayer is set to the player that was collided with
diceroll2.movingplayer = player1
print(diceroll2.movingplayer.name)
# the diceroll 2 page is updated
diceroll2.update()
print("updated")
# the diceroll 2 page is drawn
page.diceroll2()
main.extra = True
print(page.pagenumber)
print("page = diceroll2?")
# if player 3 has a collision with player1
elif self.xx - 10 < player3.xx < self.xx + 10 and \
self.yy - 10 < player3.yy < self.yy + 10:
print("ive been here")
# the diceroll 2 page movingplayer is set to the player that was collided with
diceroll2.movingplayer = player3
print(diceroll2.movingplayer.name)
# the diceroll 2 page is updated
diceroll2.update()
print("updated")
# the diceroll 2 page is drawn
page.diceroll2()
main.extra = True
print(page.pagenumber)
print("page = diceroll2?")
# if player 4 has a collision with player1
elif self.xx - 10 < player2.xx < self.xx + 10 and \
self.yy - 10 < player2.yy < self.yy + 10:
print("ive been here")
# the diceroll 2 page movingplayer is set to the player that was collided with
diceroll2.movingplayer = player2
print(diceroll2.movingplayer.name)
# the diceroll 2 page is updated
diceroll2.update()
print("updated")
# the diceroll 2 page is drawn
page.diceroll2()
main.extra = True
print(page.pagenumber)
print("page = diceroll2?")
def draw(self, screen):
# This is the playername and playerscore in the sidebar of the game
playername = Font.settings(None, 30, self.name, self.color)
playerscore = Font.settings(None, 30, str(self.wins), color.black)
# These are the players being drawn. they move at 60pixels per second.
# they stop moving when they reached their newx/newy
# after that they check if they went out of the map, or had a collision
# then the buttons are drawn so you can go to the next question/player
if self.direction == "up":
if self.yy > self.newy:
self.yy -= 1
time.sleep(0.01)
startgame.drawbuttons = False
elif self.yy == self.newy:
startgame.drawbuttons = True
self.check()
print("checked")
if self.direction == "down":
if self.yy < self.newy:
self.yy += 1
time.sleep(0.01)
startgame.drawbuttons = False
elif self.yy == self.newy:
startgame.drawbuttons = True
self.check()
print("checked")
if self.direction == "left":
if self.xx > self.newx:
self.xx -= 1
time.sleep(0.01)
startgame.drawbuttons = False
self.check()
elif self.xx == self.newx:
startgame.drawbuttons = True
if self.direction == "right":
if self.xx < self.newx:
self.xx += 1
time.sleep(0.01)
startgame.drawbuttons = False
self.check()
elif self.xx == self.newx:
startgame.drawbuttons = True
# actually draw the players
pygame.draw.circle(screen, self.color, (self.xx, self.yy), 10)
# actually draw the playername/score
screen.blit(playername, (self.x, self.y))
screen.blit(playerscore, (self.x, (self.y + 20)))
# class for the default screen
class mainpage:
def draw(self, screen):
# draw background
startimg = pygame.image.load("./Design_Afbeeldingen/startscherm.png")
screen.blit(startimg, (0, 0))
# draw start button, if pressed move to the next screen
button("start", 269, 158, 265, 72, None, color.green, "game")
start_button_font = Font.settings(None, 40, "", color.black)
# draw quit button, if pressed quit the game
button("quit", 269, 485, 265, 72, None, color.green, "quit")
quit_button_font = Font.settings(None, 40, "", color.black)
# draw instructions button, if pressed move to instructionpage
button("instructions", 269, 321, 265, 72, None, color.green, "instruction")
inststurctions_button_font = Font.settings(None, 40, "", color.black)
# draw Highscores button, if pressed go to highscore page
button("highscores", 269, 239, 265, 72, None, color.green, "highscores")
# draw settings button, if pressed move to settings page
button("settings", 269, 403, 265, 72, None, color.green, "settings")
# blit the button fonts.
screen.blit(start_button_font, (370, 247))
screen.blit(quit_button_font, (370, 430))
screen.blit(inststurctions_button_font, (370, 340))
# class for the instruction screen
class instructionpage:
def draw(self, screen):
# draw the text of the instructions( in an image because of pygame text reasons)
instructionimg = pygame.image.load("./Design_Afbeeldingen/instructions.png")
screen.blit(instructionimg, (0, 0))
# draw the back button, if pressed go back to start page
button("back", 16, 545, 90, 40, color.green, color.green, "main")
class timer:
def __init__(self):
# self.number = 50 so the timer starts at 50
# self.sec = 0 and counts up every tick of the game(15 per sec)
# if sec % 15 == 0(this is every 1 in 15 tick so once a second)
# count the timer down
self.number = 50
self.tick = 0
def update(self):
self.tick += 1
if self.tick % 15 == 0:
self.number -= 1
timer = timer()
class Questionpage:
def __init__(self, screen):
# self.categories = ["Sports"]
self.categories = ["Sports", "Geography", "Entertainment", "History"]
self.choose = random.choice(self.categories)
# Create empty list.
self.answers = []
self.action = []
self.button_list = []
self.screen = screen
# Put an random int in variable so it will random pick an question.
self.pick_question = int(random.randint(0, 0))
self.question = ""
self.question_font = None
self.correct_answer = None
self.answer_button_font_1 = ""
self.answer_button_font_2 = ""
self.answer_button_font_3 = ""
self.number = random.randint(0, 15)
def draw_category(self, screen):
# set the background
your_category_BG = pygame.image.load("./Design_Afbeeldingen/background.png")
screen.blit(your_category_BG, (0, 0))
# create question font
question = "Your category is: " + turns.currentplayer.category
question_font = Font.settings(Font.font_doctor_bold, 45, question, color.black)
# Create sports button
button_draw("OK", 318, 263, 160, 72, color.grey, color.green, "OK")
ok_font = Font.settings(Font.font_orange_juice, 40, "OK", color.black)
# blit that shit
screen.blit(question_font, (150, 150))
screen.blit(ok_font, (375, 285))
def draw_questions(self, category, color_category, question, ans_1, ans_2, ans_3):
category = category
categorie_font = Font.settings(Font.font_skater, 40, category, color_category)
if not main.stop_loop:
main.stop_loop = True
# Create question.
self.question = question
self.question_line1 = self.question[0]
self.question_line2 = self.question[1]
self.question_font1 = Font.settings(Font.font_doctor_light, 33, self.question_line1, color.black)
self.question_font2 = Font.settings(Font.font_doctor_light, 33, self.question_line2, color.black)
# Double check if there are two answers
# Create a list with answer for the loop in buttons and make it random order
if ans_3[0] == "" and ans_3[1] == "":
self.button_list = {0: (ans_1[0], ans_1[1]),
1: (ans_2[0], ans_2[1])}
else:
self.button_list = {0: (ans_1[0], ans_1[1]),
1: (ans_2[0], ans_2[1]),
2: (ans_3[0], ans_3[1])}
random.shuffle(self.button_list)
how_many = len(self.button_list)
# Create buttons for the answers
if how_many == 3:
button_draw(self.button_list[0][0], 150, 240, 500, 72, color.grey, color.green, self.button_list[0][1])
self.answer_button_font_1 = Font.settings(None, 30, "A. " + self.button_list[0][0], color.white)
button_draw(self.button_list[1][0], 150, 340, 500, 72, color.grey, color.green, self.button_list[1][1])
self.answer_button_font_2 = Font.settings(None, 30, "B. " + self.button_list[1][0], color.white)
button_draw(self.button_list[2][0], 150, 440, 500, 72, color.grey, color.green, self.button_list[2][1])
self.answer_button_font_3 = Font.settings(None, 30, "C. " + self.button_list[2][0], color.white)
elif how_many == 2:
button_draw(self.button_list[0][0], 150, 240, 500, 72, color.grey, color.green, self.button_list[0][1])
self.answer_button_font_1 = Font.settings(None, 30, "A. " + self.button_list[0][0], color.white)
button_draw(self.button_list[1][0], 150, 340, 500, 72, color.grey, color.green, self.button_list[1][1])
self.answer_button_font_2 = Font.settings(None, 30, "B. " + self.button_list[1][0], color.white)
# blit that shit
self.screen.blit(self.question_font1, (30, 110))
self.screen.blit(self.question_font2, (30, 150))
self.screen.blit(categorie_font, (330, 50))
self.screen.blit(self.answer_button_font_1, (160, 270))
self.screen.blit(self.answer_button_font_2, (160, 370))
# print the timer on the screen
timerstr = Font.settings(None, 30, str(timer.number), color.white)
self.screen.blit(timerstr, (10, 10))
timer.update()
if how_many == 3:
self.screen.blit(self.answer_button_font_3, (160, 470))
pygame.display.update()
# check if the time has run out
page.check()
def draw_sports(self, screen):
questions_sports_BG = pygame.image.load("./Design_Afbeeldingen/background.png")
screen.blit(questions_sports_BG, (0, 0))
# Put all the questions in place
questions = [
[("Welke manier van sport word ", "het meest beoefend in Rotterdam?"),
("Fitness", "correct_answer"),
("Voetbal", "wrong_answer"),
("Basketball", "wrong_answer")],
[("Hoe heet het centrum voor sport naast de kuip?", ""),
("Schuttersveld", "wrong_answer"),
("Topsportcentrum Rotterdam", "correct_answer"),
("Sportcentrum de Wilgenring", "wrong_answer")],
[("In welk jaar startte de Tour de France in Rotterdam?", ""),
("2000", "wrong_answer"),
("2005", "wrong_answer"),
("2010", "correct_answer")],
[("Welk tennistoernooi word er elk jaar in Ahoy gehouden?", ""),
("ABN AMRO World Tennis Tournament", "correct_answer"),
("Ahoy Open", "wrong_answer"),
("Heineken Open", "wrong_answer")],
[("Wat is een hockeyclub uit Rotterdam?", ""),
("HVGR", "wrong_answer"),
("Focus", "wrong_answer"),
("HC Rotterdam", "correct_answer")],
[("Welke Olympiër groeide op in Rotterdam?", ""),
("Dorian van Rijsselberghe", "wrong_answer"),
("Marhinde Verkerk", "correct_answer"),
("Edith Bosch", "wrong_answer")],
[("Op welke baan vond het WK roeien in 2016 plaats?", ""),
("Willem Alexander baan", "correct_answer"),
("Beatrix baan", "wrong_answer"),
("Juliana baan", "wrong_answer")],
[("Voor welke 3 sporten is de", "Willem Alexander baan het meest geschikt?"),
("Watersporten, Wielrennen en hardlopen", "correct_answer"),
("Voetbal, Hockey en basketbal", "wrong_answer"),
("Fitness, hardlopen en basketbal", "wrong_answer")],
[("Op welke positie in het veld speelde", "Coen Moulijn voor zowel Feyenoord als het Nederlands elftal?"),
("Rechtsback", "wrong_answer"),
("Linksback", "wrong_answer"),
("Linksbuiten", "correct_answer")],
[("Hoe heet het stadion van Sparta Rotterdam?", ""),
("De Toren", "wrong_answer"),
("Het Kasteel", "correct_answer"),
("De Arena", "wrong_answer")],
[("Hoe lang is de NN Marathon van Rotterdam?", ""),
("42,125 km", "correct_answer"),
("42,450 km", "wrong_answer"),
("42,680 km", "wrong_answer")],
[("Hoeveel spelers staan er", "per team bij lacrosse op het veld?"),
("9", "wrong_answer"),
("10", "correct_answer"),
("11", "wrong_answer")],
[("In welk jaar is de honkbalclub Neptunes opgericht?", ""),
("1850", "wrong_answer"),
("1875", "wrong_answer"),
("1900", "correct_answer")],
[("Een honkbal is groter dan een softbal.", ""),
("Waar", "wrong_answer"),
("Niet Waar", "correct_answer"),
("Even Groot", "wrong_answer")],
[("Hoeveel mensen staan er", "achter de slagman bij honkbal?"),
("1", "wrong_answer"),
("2", "correct_answer"),
("3", "wrong_answer")]
]
# call in the questions
random.shuffle(questions)
if len(questions[0]) == 4:
self.draw_questions("Sports", color.blue,
questions[0][0],
questions[0][1],
questions[0][2],
questions[0][3])
else:
self.draw_questions("Sports", color.blue,
questions[0][0],
questions[0][1],
questions[0][2],
("", ""))
def draw_geography(self, screen):
questions_geography_BG = pygame.image.load("./Design_Afbeeldingen/background.png")
screen.blit(questions_geography_BG, (0, 0))
questions = [
[("Welke brug in Rotterdam heeft de volgende bijnaam: De zwaan.", ""),
("De Willemsbrug", "wrong_answer"),
("De Erasmusbrug", "correct_answer"),
("De van Briennenoordbrug", "wrong_answer")],
[("Rotterdam is de hoofdstad van Nederland.", ""),
("Waar", "wrong_answer"),
("Niet Waar", "correct_answer")],
[("Rotterdam is de hoofdstad van Zuid-Holland.", ""),
("Waar", ""),
("Niet Waar", "correct_answer")],
[("Rotterdam is de grootste stad van Nederland.", ""),
("Waar", "correct_answer"),
("Niet Waar", "wrong_answer")],
[("De haven van Rotterdam is de grootste haven van Nederland.", ""),
("Waar", "correct_answer"),
("Niet Waar", "wrong_answer")],
[("Wat is het belangrijkste vervoersmiddel in Rotterdam?.", ""),
("Metro", "correct_answer"),
("Auto", "wrong_answer"),
("Fiets", "wrong_answer")],
[("Hoeveel millimeter regen valt er", "gemiddeld per jaar in Rotterdam?"),
("760 tot 780mm", "wrong_answer"),
("780 tot 800mm", "wrong_answer"),
("800 tot 820mm", "correct_answer")],
[("Hoeveel woningen zijn er ongeveer in Rotterdam?", ""),
("150.000", "wrong_answer"),
("300.000", "wrong_answer"),
("450.000", "correct_answer")],
[("Wat is het oudste gebouw van Rotterdam?.", ""),
("Kerktoren hillegondakerk", "correct_answer"),
("St. Laurenskerk.", "wrong_answer"),
("Stadhuis van Rotterdam", "wrong_answer")],
[("Hoeveel mensen maken dagelijks gebruik", "van het openbaar vervoer in Rotterdam?"),
("800.000", "wrong_answer"),
("900.000", "wrong_answer"),
("1.000.000", "correct_answer")],
[("Wat is de oudste brug van Rotterdam?", ""),
("De Willemsbrug", "wrong_answer"),
("De Koninginnebrug", "correct_answer"),
("De van Briennenoordbrug", "wrong_answer")],
[("Rotterdam word ook wel de …. Genoemd", ""),
("stad der wondereng", "wrong_answer"),
("stad der steden", "wrong_answer"),
("Haven stad", "correct_answer")],
[("In welke provincie ligt Rotterdam?", ""),
("Noord-Holland", "wrong_answer"),
("Zuid-Holland", "correct_answer"),
("Noord-Brabant", "wrong_answer")],
[("Hoe heet de grootste rivier waar Rotterdam aan grenst?", ""),
("De Maas", "correct_answer"),
("De Rijn", "wrong_answer"),
("De Waal", "wrong_answer")],
[("De afstand tussen Rotterdam is ongeveer?", ""),
("50 tot 60km", "wrong_answer"),
("60 tot 70km", "wrong_answer"),
("70 tot 80km", "correct_answer")],
]
random.shuffle(questions)
# call in the questions
if len(questions[0]) == 4:
self.draw_questions("Geography", color.green,
questions[0][0],
questions[0][1],
questions[0][2],
questions[0][3])
else:
self.draw_questions("Geography", color.green,
questions[0][0],
questions[0][1],
questions[0][2],
("", ""))
def draw_entertainment(self, screen):
questions_entertainment_BG = pygame.image.load("./Design_Afbeeldingen/background.png")
screen.blit(questions_entertainment_BG, (0, 0))
questions = [
[("Welke bar in Rotterdam werd in", "2009 de beste bar ter wereld benoemd?"),
("De Witte Aap", "correct_answer"),
("Het NRC", "wrong_answer"),
("Café de Beurs", "wrong_answer")],
[("Hoe heet de bekendste escape room in Rotterdam?", ""),
("R’dam Escape", "wrong_answer"),
("Escape010", "correct_answer"),
("Room Escape", "wrong_answer")],
[("Voor welk vervoermiddel is er", "geen tour door Rotterdam beschikbaar?"),
("Segway", "wrong_answer"),
("Boot", "wrong_answer"),
("Auto", "correct_answer")],
[("Welk van de volgende winkels is niet rond de koopgoot?", ""),
("H&M", "wrong_answer"),
("Media Markt", "correct_answer"),
("The Sting", "wrong_answer")],
[("In welke bioscoop vindt het Wildlife Film Festival plaats?", ""),
("Cinerama", "correct_answer"),
("Pathé de Kuip", "wrong_answer"),
("Pathé Schouwburgplein", "wrong_answer")],
[("Voor welk museum staat het monument", "van Zadkine genaamd “De Verwoest Stad”?"),
("Havenmuseum", "wrong_answer"),
("Mariniersmuseum", "wrong_answer"),
("Maritiem museum", "correct_answer")],
[("Waar geeft de Rotterdam Tours onder andere rondleidingen?", ""),
("De Euromast", "wrong_answer"),
("Museumplein", "wrong_answer"),
("De Markthal", "correct_answer")],
[("Welke van de volgende Pathé bioscopen is niet in Rotterdam?", ""),
("Pathé de Kuip", "wrong_answer"),
("Pathé de Kroon", "correct_answer"),
("Pathé Schouwburgplein", "wrong_answer")],
[("Hoeveel bezoekers zijn er jaarlijks bij de Marathon Rotterdam?", ""),
("925.000 bezoekers", "correct_answer"),
("675.000 bezoekers", "wrong_answer"),
("830.000 bezoekers", "wrong_answer")],
[("Waar kan je niet terecht om te gaan zwemmen?", ""),
("Hoek van Holland", "wrong_answer"),
("Euromast Park", "correct_answer"),
("Plaswijckpark", "wrong_answer")],
[("Welke landen kun je behalve", "Nederland ook in Miniworld Rotterdam zien?"),
("Luxemburg en België", "correct_answer"),
("Duitsland en België", "wrong_answer"),
("Duitsland en Frankrijk", "wrong_answer")],
[("Hoe heet de culturele en culinaire", "ontdekkingstocht door Rotterdam?"),
("Drive & Eat", "wrong_answer"),
("Bicycle Diner", "wrong_answer"),
("Bike & Bite", "correct_answer")],
[("Welk van de volgende restaurantboten", "in Rotterdam bestaat niet?"),
("De Zwanenboot", "correct_answer"),
("De Pannenkoekenboot", "wrong_answer"),
("De Berenboot", "wrong_answer")],
[("Welk van de volgende bioscopen is het oudst?", ""),
("Cinerama", "wrong_answer"),
("Pathé de Kuip", "wrong_answer"),
("LantarenVenster", "correct_answer")],
[("Op welk plein vindt jaarlijkse het Najaarskermis Rotterdam plaats?", ""),
("Mullerpier", "correct_answer"),
("Pier 80", "wrong_answer"),
("Schouwburgplein", "wrong_answer")],
]
random.shuffle(questions)
# call in the questions
if len(questions[0]) == 4:
self.draw_questions("Entertainment", color.red,
questions[0][0],
questions[0][1],
questions[0][2],
questions[0][3])
else:
self.draw_questions("Entertainment", color.red,
questions[0][0],
questions[0][1],
questions[0][2],
("", ""))
def draw_history(self, screen):
questions_history_BG = pygame.image.load("./Design_Afbeeldingen/background.png")
screen.blit(questions_history_BG, (0, 0))
# Put all the questions in place
questions = [
[("Waar dankt Rotterdam zijn naam aan?", ""),
("Kooplieden hadden dit vroeger bedacht", "wrong_answer"),
("Aan de rivier de rotte", "correct_answer"),
("Er was een dam aangelegd in de maas", "wrong_answer")],
[("Wat is het enigste overgebleven", "middeleeuws gebouw in de binnenstad van Rotterdam?"),
("De oude haven", "wrong_answer"),
("VOC magazijn Rotterdam", "wrong_answer"),
("St. Laurenskerk", "correct_answer")],
[("Wie is de nachtburgemeester van Rotterdam?", ""),
("Ahmed Aboutaleb", "wrong_answer"),
("Jules Deelder", "correct_answer"),
("Willem Alexander", "wrong_answer")],
[("Was de eerste metrolijn in Nederland in Rotterdam geopend?", ""),
("Waar, in 1968", "correct_answer"),
("Niet waar", "wrong_answer")],
[("Waar stond vroeger de wijk Katendrecht om bekend?", ""),
("De beste bakker van de stad was daar gevestigd", "wrong_answer"),
("De prostituees", "correct_answer"),
("De oudste beschermde boom van de stad staat daar", "wrong_answer")],
[("Wanneer is diergaarde Blijdorp geopend?", ""),
("1855", "correct_answer"),
("1975", "wrong_answer"),
("1995", "wrong_answer")],
[("Wat is de officiële naam van de koopgoot?", ""),
("De ondergrondse winkelstraat", "wrong_answer"),
("Beurstraverse", "correct_answer"),
("Gewoon de koopgoot", "wrong_answer")],
[("Welk gebouw (gebouwd in 1957)", "stond symbool voor de wederopbouw van de stad?"),
("De Bijenkorf", "correct_answer"),
("De Kubuswoningen", "wrong_answer"),
("The red apple", "wrong_answer")],
[("Rotterdam voor de Tweede Wereldoorlog?", ""),
("ca. 5000", "wrong_answer"),
("ca. 8000", "wrong_answer"),
("ca. 12000", "correct_answer")],
[("Wereldoorlog de enige weg naar het", "centrum die de Duitsers probeerden te bereiken?"),
("De nieuwe Binnenweg", "wrong_answer"),
("Maasbrug", "correct_answer"),
("Koninginnenbrug", "wrong_answer")],
[("Rotterdam was tot 1870 een opslag haven,", "welke producten werden er onder ander opgeslagen?"),
("Suiker", "correct_answer"),
("Wol", "correct_answer"),
("Cacao", "wrong_answer")],
[("Door welke architect(en) is de Euromast ontworpen?", ""),
("Maaskant", "correct_answer"),
("Brinkman en van der Vlugt", "wrong_answer"),
("Koolhaas", "wrong_answer")],
[("In welk jaar heeft Rotterdam stadsrechten gekregen?", ""),
("1250", "wrong_answer"),
("1340 en van der Vlugt", "correct_answer"),
("1590", "wrong_answer")],
[("Hoe heette de haven van Rotterdam", "oorspronkelijk tijdens zijn ontstaan?"),
("Waalhaven", "correct_answer"),
("De Maashaven", "wrong_answer"),
("Europoort", "wrong_answer")],
]
random.shuffle(questions)
# call in the questions
if len(questions[0]) == 4:
self.draw_questions("History", color.yellow,
questions[0][0],
questions[0][1],
questions[0][2],
questions[0][3])
else:
self.draw_questions("History", color.yellow,
questions[0][0],
questions[0][1],
questions[0][2],
("", ""))
def draw_rating(self, correct, screen):
rating_BG = pygame.image.load("./Design_Afbeeldingen/background.png")
screen.blit(rating_BG, (0, 0))
# Answer is correct
rating = "Your answer is "
if correct == 1:
rating += "CORRECT"
color_answer = color.green
correct_img = pygame.image.load("./Design_Afbeeldingen/correct1.png")
screen.blit(correct_img, (0, 0))
# Answer is incorrect
elif correct == 0:
rating += "INCORRECT"
color_answer = color.red
incorrect_img = pygame.image.load("./Design_Afbeeldingen/incorrect.png")
screen.blit(incorrect_img, (0, 0))
question_font = Font.settings(Font.font_skater, 45, rating, color_answer)
# Create button to proceed
if correct == 1:
button_draw("proceed", 318, 263, 144, 72, color.grey, color.green, "diceroll")
proceed_font = Font.settings(Font.font_doctor_bold, 35, "proceed", color.black)
else:
button_draw("proceed", 318, 263, 144, 72, color.grey, color.green, "proceed")
proceed_font = Font.settings(Font.font_doctor_bold, 35, "proceed", color.black)
# blit that shit
screen.blit(question_font, (175, 150))
screen.blit(proceed_font, (342, 279))
class HighScore:
def draw(self, screen):
#page background
highscores_BG = pygame.image.load("./Design_Afbeeldingen/highscores.png")
screen.blit(highscores_BG, (0, 0))
results = data.select_query("""SELECT name, wins, type AS category, color
FROM players
INNER JOIN categories
ON players.category = categories.id ORDER BY wins DESC LIMIT 5;""", True)
# count how many results
how_many = len(results)
# begin position of the results in Y
position_y = 185
# begin font size
font_size = 40
# Loop the results from the database
for i in range(how_many):
player = str(results[i][0])
player_font = Font.settings(None, font_size, player, color.black)
player_score = str(results[i][1])
player_score_font = Font.settings(None, font_size, player_score, color.black)
player_category = str(results[i][2])
player_category_font = Font.settings(None, font_size, player_category, color.black)
screen.blit(player_font, (48, position_y))
screen.blit(player_score_font, (313, position_y))
screen.blit(player_category_font, (480, position_y))
# the new y position for the next result
position_y += 68
# the lower the position , the lower the font size
font_size -= 3
# back Button
button("back", 16, 543, 90, 40, color.red, color.green, "main")
# Titles
title = Font.settings(None, 50, "", color.black)
name = Font.settings(None, 35, "", color.black)
scores = Font.settings(None, 35, "", color.black)
category = Font.settings(None, 35, "", color.black)
# # blit that shit
screen.blit(title, (250, 50))
screen.blit(name, (30, 150))
screen.blit(scores, (310, 150))
screen.blit(category, (510, 150))
# class for the game screen
class gamepage:
def __init__(self):
# self.players = 0 so we can store a player amount in it
# self.playerschosen = False, will be True if a number is picked
# then the continue button will be useable
# other value are for showing buttons when one is pressed or not
# if 2 is pressed, show only 2.. etc.
self.players = 0
self.playerschosen = False
self.buttonstwo = True
self.buttonsthree = True
self.buttonsfour = True
def draw(self, screen):
# background playerselection
playerselection = pygame.image.load("./Design_Afbeeldingen/playerselection.png")
screen.blit(playerselection, (0, 0))
# ask how many players
question = Font.settings(None, 30, "", color.white)
# quit button
button("back", 16, 543, 90, 40, color.red, color.green, "main")
quitbuttonfont = Font.settings(None, 30, "", color.white)
# start button
button("start", 694, 543, 90, 40, color.green, color.green, "start")
startbuttonfont = Font.settings(None, 30, "", color.white)
if self.buttonstwo:
# make a button to select how many players you want
# two players
button_draw("two", 139, 267, 160, 65, color.grey, color.green, "two")
twofont = Font.settings(Font.font_dynamix, 36, "2", color.black)
screen.blit(twofont, (202, 263))
if self.buttonsthree:
# three players
button_draw("three", 319, 267, 160, 65, color.grey, color.green, "three")
threefont = Font.settings(Font.font_dynamix, 36, "3", color.black)
screen.blit(threefont, (385, 263))
if self.buttonsfour:
# four players
button_draw("four", 499, 267, 160, 65, color.grey, color.green, "four")
fourfont = Font.settings(Font.font_dynamix, 36, "4", color.black)
screen.blit(fourfont, (560, 263))
# blit the button fonts
screen.blit(quitbuttonfont, (425, 425))
screen.blit(startbuttonfont, (225, 425))
screen.blit(question, (10, 10))
# if buttons are pressed, call this function so the self.players is adjusted accordingly
def playerstwo(self):
self.players = 2
def playersthree(self):
self.players = 3
def playersfour(self):
self.players = 4
class startgame:
def __init__(self):
# draw the buttons only if this value is true. its true by default,
# set to false while moving
# and set back to true once players are done moving.
self.drawbuttons = True
def draw(self, screen):
# draw the game board
# TODO insert correct image
background = pygame.image.load("./Design_Afbeeldingen/playground.png")
backgroundpos = (0, 0)
background = background.convert()
# blit background before drawing players so players get in front of background
screen.blit(background, backgroundpos)
if self.drawbuttons:
# draw start and next button
# start makes you go to a questionpage
# nextplayer move the currentplayer to next player
button("playground-main", 8, 8, 120, 40, color.red, color.green, "main")
button("savegame", 8, 53, 120, 40, color.red, color.green, None)
button("start", 83, 544, 135, 45, color.grey, color.green, "questions")
button("Next", 583, 544, 135, 45, color.grey, color.green, "nextplayer")
# draw whose turn it is currently
if turns.currentplayer == player1:
current = Font.settings(Font.font_skater, 37, "ITS'S " + turns.currentplayer.name + " TURN!", player1.color)
elif turns.currentplayer == player2:
current = Font.settings(Font.font_skater, 37, "ITS'S " + turns.currentplayer.name + " TURN!", player2.color)
elif turns.currentplayer == player3:
current = Font.settings(Font.font_skater, 37, "ITS'S " + turns.currentplayer.name + " TURN!", player3.color)
elif turns.currentplayer == player4:
current = Font.settings(Font.font_skater, 37, "ITS'S " + turns.currentplayer.name + " TURN!", player4.color)
screen.blit(current, (250, 550))
# draw the players
player1.draw(screen)
player2.draw(screen)
if gamepage.players == 3:
player3.draw(screen)
elif gamepage.players == 4:
player3.draw(screen)
player4.draw(screen)
class turns:
def __init__(self):
# self.n True by default, set to false when a list is created, so the list_
# _wont change every iteration.
# currentplayer None, so we can put in a currentplayer once needed
# list is empty, so we can insert the player data later on
self.n = True
self.currentplayer = None
self.list = []
def update(self):
# this is ticked only once
if self.n:
# check for the amount of players, then put them in a list accordingly
if gamepage.players == 4:
self.list = {0: player1, 1: player2, 2: player3, 3: player4}
elif gamepage.players == 3:
self.list = {0: player1, 1: player2, 2: player3}
else:
self.list = {0: player1, 1: player2}
# now shuffle the list to put the players in a random order
random.shuffle(self.list)
self.n = False
# put the first player in the list
if main.listremove < gamepage.players:
self.currentplayer = self.list[main.listremove]
def next(self):
# ticked every time a player pressed nextplayer in the game
# if the counter < the amount of players, add 1 to it so we move to next player
if main.listremove < gamepage.players:
main.listremove += 1
# if counter == players, set it back to 0, we now start the player loop over again.
if main.listremove == gamepage.players:
main.listremove = 0
# put the currentplayer in currentplayer(look in the list and use counter as key)
self.currentplayer = self.list[main.listremove]
class whostarts:
def __init__(self):
# this is where we will give each player a category.
# create a list with every category and set self.chosen to 0
self.categories = ["Sports", "Geography", "Entertainment", "History"]
self.chosen = 0
def draw(self, screen):
# drawing the background
whostartsimg = pygame.image.load("./Design_Afbeeldingen/background.png")
screen.blit(whostartsimg, (0, 0))
# here we call turns.update, we now have a list with players
turns.update()
# draw continue button if all players chose a category
# draw the category buttons only if the category is still in the list.
# if the button is pressed remove the category from the list, put it in the player_
# _ and stop showing the button for the corresponding category.
if self.chosen >= gamepage.players:
button_draw("Next", 692, 543, 90, 40, color.grey, color.green,"asknames")
buttontext = Font.settings(Font.font_orange_juice, 36, "Next", color.black)
screen.blit(buttontext, (700, 547))
if "Sports" in self.categories:
button_draw("Sports", 163, 207, 226, 83, color.grey, color.blue, "Sports")
sports_text = Font.settings(Font.font_skater, 24, "SPORTS", color.blue)
screen.blit(sports_text, (237, 240))
if "Geography" in self.categories:
button_draw("Geography", 410, 207, 226, 83, color.grey, color.green, "Geography")
geography_text = Font.settings(Font.font_skater, 24, "GEOGRAPHY", color.green)
screen.blit(geography_text, (468, 240))
if "Entertainment" in self.categories:
button_draw("Entertainment", 163, 311, 226, 83, color.grey, color.red, "Entertainment")
entertainment_text = Font.settings(Font.font_skater, 24, "ENTERTAINMENT", color.red)
screen.blit(entertainment_text, (199, 345))
if "History" in self.categories:
button_draw("History", 410, 311, 226, 83, color.grey, color.yellow, "History")
history_text = Font.settings(Font.font_skater, 24, "HISTORY", color.yellow)
screen.blit(history_text, (483, 345))
# draw who starts
starter = Font.settings(Font.font_skater, 45, str(turns.currentplayer.name) + "'S TURN!", color.black)
screen.blit(starter, (252, 90))
class diceroll:
def __init__(self):
# self.n = 0 by default, it will be a randint between 1-6 when update is called
# self.file = none, will be an image when update is called
# self.x = 300, this is where the dice will be drawn by default
# self.y = 100, this is where the dice will be drawn by default
# self.showbuttons, to show buttons only if the dice is rolled.
self.n = 0
self.file = None
self.x = 300
self.y = 100
self.showbuttons = False
def update(self):
# put in a randint, make self.file have corresponding img
print(turns.currentplayer.correct)
self.n = random.randint(1, 6)
print(self.n)
d1 = pygame.image.load("./Game_Dice/1.png")
d2 = pygame.image.load("./Game_Dice/2.png")
d3 = pygame.image.load("./Game_Dice/3.png")
d4 = pygame.image.load("./Game_Dice/4.png")
d5 = pygame.image.load("./Game_Dice/5.png")
d6 = pygame.image.load("./Game_Dice/6.png")
if self.n == 1:
self.file = d1
elif self.n == 2:
self.file = d2
elif self.n == 3:
self.file = d3
elif self.n == 4:
self.file = d4
elif self.n == 5:
self.file = d5
elif self.n == 6:
self.file = d6
def draw(self, screen):
# draw the background
diceroll_BG = pygame.image.load("./Design_Afbeeldingen/diceroll.png")
screen.blit(diceroll_BG, (0, 0))
# draw the dice
button("roll dice", 620, 98, 102, 64, color.blue, color.green, "update")
screen.blit(self.file, (self.x, self.y))
# if diced, show the buttons
if self.showbuttons:
button("quit", 628, 523, 90, 40, color.white, color.green, "proceed")
button("up", 628, 235, 90, 40, color.white, color.green, "up")
button("down", 628, 325, 90, 40, color.white, color.green, "down")
button("left", 580, 280, 90, 40, color.white, color.green, "left")
button("right", 675, 280, 90, 40, color.white, color.green, "right")
# pages for the dice animations
# main.diced counts the amount of wall collisions.
# if main.diced == 5 we set the page back to the dicepage, reset main.diced _
# _ and show the buttons to move.
# every time the dice hits a wall, it will move to another direction, according to
# physics and stuff.
def moverightup(self, screen):
diceroll_move_BG = pygame.image.load("./Design_Afbeeldingen/diceroll.png")
screen.blit(diceroll_move_BG, (0, 0))
print(main.diced)
if main.diced < 5:
screen.blit(self.file, (self.x, self.y))
self.y -= 4
self.x += 4
if self.x % 100 == 0:
diceroll.update()
time.sleep(0.001)
if self.y < 40:
page.pagenumber = 21
main.diced += 1
elif self.x > 419:
page.pagenumber = 24
main.diced += 1
else:
page.pagenumber = 12
main.diced = 0
self.showbuttons = True
print(page.pagenumber)
def moverightdown(self, screen):
diceroll_move1_BG = pygame.image.load("./Design_Afbeeldingen/diceroll.png")
screen.blit(diceroll_move1_BG, (0, 0))
print(main.diced)
if main.diced < 5:
screen.blit(self.file, (self.x, self.y))
self.y += 4
self.x += 4
if self.x % 100 == 0:
diceroll.update()
time.sleep(0.001)
if self.y > 428:
page.pagenumber = 22
main.diced += 1
elif self.x > 419:
page.pagenumber = 23
main.diced += 1
else:
page.pagenumber = 12
main.diced = 0
self.showbuttons = True
print(page.pagenumber)
def moveleftup(self, screen):
diceroll_move2_BG = pygame.image.load("./Design_Afbeeldingen/diceroll.png")
screen.blit(diceroll_move2_BG, (0, 0))
print(main.diced)
if main.diced < 5:
screen.blit(self.file, (self.x, self.y))
self.y -= 4
self.x -= 4
if self.x % 100 == 0:
diceroll.update()
time.sleep(0.001)
if self.y < 40:
page.pagenumber = 23
main.diced += 1
elif self.x < 30:
page.pagenumber = 22
main.diced += 1
else:
page.pagenumber = 12
main.diced = 0
self.showbuttons = True
print(page.pagenumber)
def moveleftdown(self, screen):
diceroll_move3_BG = pygame.image.load("./Design_Afbeeldingen/diceroll.png")
screen.blit(diceroll_move3_BG, (0, 0))
print(main.diced)
if main.diced < 5:
screen.blit(self.file, (self.x, self.y))
self.y += 4
self.x -= 4
if self.x % 100 == 0:
diceroll.update()
time.sleep(0.001)
if self.y > 428:
page.pagenumber = 24
main.diced += 1
elif self.x < 30:
page.pagenumber = 21
main.diced += 1
else:
page.pagenumber = 12
main.diced = 0
self.showbuttons = True
print(page.pagenumber)
class diceroll2:
def __init__(self):
# this is the same as diceroll, minus the animation. the page is loaded and _
# _ instantly updates, giving a random dicenumber, and prints this on the screen.
# the user can click on a direction and he will move there.
# the user is movingplayer. he is set in this value at the player.check()
# where we check for a collision, and if collision, we put the collided with player
# in this value, and call this page.
self.n = 0
self.file = None
self.movingplayer = None
def update(self):
turns.currentplayer.correct = False
print(turns.currentplayer.correct)
diceroll.n = random.randint(1, 6)
print(diceroll.n)
d1 = pygame.image.load("./Game_Dice/1.png")
d2 = pygame.image.load("./Game_Dice/2.png")
d3 = pygame.image.load("./Game_Dice/3.png")
d4 = pygame.image.load("./Game_Dice/4.png")
d5 = pygame.image.load("./Game_Dice/5.png")
d6 = pygame.image.load("./Game_Dice/6.png")
if diceroll.n == 1:
self.file = d1
elif diceroll.n == 2:
self.file = d2
elif diceroll.n == 3:
self.file = d3
elif diceroll.n == 4:
self.file = d4
elif diceroll.n == 5:
self.file = d5
elif diceroll.n == 6:
self.file = d6
def draw(self, screen):
diceroll_BG = pygame.image.load("./Design_Afbeeldingen/diceroll.png")
screen.blit(diceroll_BG, (0, 0))
dice = Font.settings(None, 50, self.movingplayer.name + " diced:", color.white)
screen.blit(dice, (10, 10))
screen.blit(self.file, (15, 60))
button("quit", 628, 523, 90, 40, color.white, color.green, "proceed2")
button("up", 628, 235, 90, 40, color.white, color.green, "up2")
button("down", 628, 325, 90, 40, color.white, color.green, "down2")
button("left", 580, 280, 90, 40, color.white, color.green, "left2")
button("right", 675, 280, 90, 40, color.white, color.green, "right2")
class endscreen:
def __init__(self):
# we go to this page once a player reached to top.
# his name and category are inserted
self.winner_name = None
self.winner_category = None
def update(self):
pass
def draw(self, screen):
endscreen_BG = pygame.image.load("./Design_Afbeeldingen/endscreen.png")
screen.blit(endscreen_BG, (0, 0))
# print the name and category and quit button
self.winner_name = Font.settings(Font.font_doctor_light, 45, "Player: " + turns.currentplayer.name, turns.currentplayer.color)
self.winner_category = Font.settings(Font.font_doctor_light, 45, "Category: " + turns.currentplayer.category, turns.currentplayer.color)
button("quit", 16, 543, 90, 40, color.green, color.green, "quit")
screen.blit(self.winner_name, (182, 217))
screen.blit(self.winner_category, (182, 265))
# add a point to the winning player
data.player_win()
class settings:
def __init__(self):
pass
def update(self):
pass
def draw(self, screen):
background_img = pygame.image.load("./Design_Afbeeldingen/settings.png")
# blit background before buttons
screen.blit(background_img, (0, 0))
# draw buttons
# if pressed on test, make a sound if on, else dont
button("soundon", 335, 201, 130, 60, color.grey, color.green, "sound0")
button("soundoff", 335, 271, 130, 60, color.grey, color.red, "sound1")
button("soundtest", 317, 361, 170, 112, color.grey, color.black, "soundtest")
button("back", 16, 543, 90, 40, color.red, color.green, "main")
string = Font.settings(None, 36, Input.string, color.black)
screen.blit(string, (10, 10))
class names:
def __init__(self):
self.n = 0
def update(self):
pass
def draw(self, screen):
# you can type on when this page is displayed. this string you typed will be
# printed immediately. if pressed on changeplayer button, the current string
# is put into currentplayer.name. the string is set empty and the current
# player is set to next player.
#page background
playername_BG = pygame.image.load("./Design_Afbeeldingen/playername1.png")
screen.blit(playername_BG, (0, 0))
button("back", 16, 543, 90, 40, color.red, color.green, "start")
if self.n == gamepage.players:
button("next", 692, 543, 90, 40, color.grey, color.green, "whostarts")
button("Save player", 297, 336, 217, 38, color.blue, color.green, "changeplayer")
string = Font.settings(None, 36, Input.string, color.black)
screen.blit(string, (281, 286))
currentPlayer_CY = Font.settings(Font.font_skater, 45, str(turns.currentplayer.name) + "'S TURN!", color.black)
screen.blit(currentPlayer_CY, (252, 90))
# this is the main loop that updates and draws the entities
class main:
# Set up screen
width = 800
height = 600
size = (width, height)
screen = pygame.display.set_mode(size)
pygame.display.set_icon(pygame.image.load("./Design_Afbeeldingen/background.png"))
pygame.display.set_caption("NulTien")
# Stop the game loop(temporary)
stop_loop = False
# create a counter for turns
listremove = 0
# extra False, set to True if extra diceroll(if collision)
# set back to false when the player has moved.
extra = False
# counter for dice animation
diced = 0
# set up tickrate
clock = pygame.time.Clock()
winner_name = None
winner_category = None
winner_color = None
# If a player has won
player_won = False
# sound is on by default, you change this value in the settings page.
sound = True
# create a list of keys(one representing the state, the other what goes in the string if pressed)
keylist = [(pygame.K_a, "a"), (pygame.K_b, "b"), (pygame.K_c, "c"), (pygame.K_d, "d"), (pygame.K_e, "e"),
(pygame.K_f, "f"), (pygame.K_g, "g"), (pygame.K_h, "h"), (pygame.K_i, "i"), (pygame.K_j, "j"),
(pygame.K_k, "k"), (pygame.K_l, "l"), (pygame.K_m, "m"), (pygame.K_n, "n"), (pygame.K_o, "o"),
(pygame.K_p, "p"), (pygame.K_q, "q"), (pygame.K_r, "r"), (pygame.K_s, "s"), (pygame.K_t, "t"),
(pygame.K_u, "u"), (pygame.K_v, "v"), (pygame.K_w, "w"), (pygame.K_x, "x"), (pygame.K_y, "y"),
(pygame.K_z, "z")]
lengthlist = len(keylist)
def __init__(self, screen, size):
self.size = size
self.screen = screen
# Start PyGame
pygame.init()
# Set up the default font
self.font = pygame.font.Font(None, 30)
# Draw everything
def draw(self):
# Clear the screen
self.screen.fill((0, 0, 0))
# Draw the scenes
# if certain page number: draw corresponding page
if page.pagenumber == 0:
mainpage.draw(self.screen)
elif page.pagenumber == 2:
instructionpage.draw(self.screen)
elif page.pagenumber == 4:
questionpage.draw_category(self.screen)
elif page.pagenumber == 5:
main.clock.tick(15)
questionpage.draw_sports(self.screen)
elif page.pagenumber == 13:
main.clock.tick(15)
questionpage.draw_entertainment(self.screen)
elif page.pagenumber == 14:
main.clock.tick(15)
questionpage.draw_history(self.screen)
elif page.pagenumber == 15:
main.clock.tick(15)
questionpage.draw_geography(self.screen)
# If answer is correct
elif page.pagenumber == 6:
questionpage.draw_rating(1, self.screen)
# If answer is incorrect
elif page.pagenumber == 7:
questionpage.draw_rating(0, self.screen)
elif page.pagenumber == 1:
gamepage.draw(self.screen)
elif page.pagenumber == 3:
startgame.draw(self.screen)
elif page.pagenumber == 11:
whostarts.draw(self.screen)
elif page.pagenumber == 12:
diceroll.draw(self.screen)
elif page.pagenumber == 16:
diceroll2.draw(self.screen)
elif page.pagenumber == 21:
diceroll.moverightdown(self.screen)
elif page.pagenumber == 22:
diceroll.moverightup(self.screen)
elif page.pagenumber == 23:
diceroll.moveleftdown(self.screen)
elif page.pagenumber == 24:
diceroll.moveleftup(self.screen)
elif page.pagenumber == 30:
endscreen.draw(self.screen)
elif page.pagenumber == 31:
settings.draw(self.screen)
elif page.pagenumber == 32:
names.draw(self.screen)
elif page.pagenumber == 40:
score.draw(self.screen)
# Flip the screen
pygame.display.flip()
if not page.pagenumber == 5:
if not page.pagenumber == 13:
if not page.pagenumber == 14:
if not page.pagenumber == 15:
main.clock.tick(60)
# The game loop
def game_loop(self):
while not process_events():
self.draw()
# this is a class to define when to quit the game
def process_events():
for event in pygame.event.get():
if event == pygame.QUIT:
# give the signal to quit
return True
if page.pagenumber == 32:
if event.type == pygame.KEYDOWN:
for i in range(main.lengthlist):
if event.key == main.keylist[i][0]:
if Input.characters < 9:
Input.string += main.keylist[i][1]
Input.characters += 1
if event.key == pygame.K_BACKSPACE:
Input.string = Input.string[:-1]
Input.characters -= 1
return False
# this is where we make an instance of the main program and run the loop
def program():
game = main(main.screen, main.size)
game.game_loop()
Input = Input()
turns = turns()
questionpage = Questionpage(main.screen)
# make instances of classes
instructionpage = instructionpage()
mainpage = mainpage()
gamepage = gamepage()
diceroll = diceroll()
diceroll2 = diceroll2()
endscreen = endscreen()
settings = settings()
color = color()
startgame = startgame()
whostarts = whostarts()
names = names()
data = Database()
score = HighScore()
# create page and set default pagenumber
page = page(0)
# create the players
player1 = player("PLAYER1", color.blue, 15, 280, 0, 0, 1)
print(player1)
player2 = player("PLAYER2", color.yellow, 15, 321, 10, 0, 2)
print(player2)
player3 = player("PLAYER3", color.green, 15, 362, 20, 0, 3)
print(player3)
player4 = player("PLAYER4", color.red, 15, 401, 30, 0, 4)
print(player4)
program()
|
Kaana38/INF1C---Project-2---groep-3
|
game.py
|
Python
|
mit
| 106,624
|
[
"Elk"
] |
d4157a040469a99a192baf8cb9f993dffcaaaaeb876b7ef7f13cabd73d85ba15
|
from gi.repository import Gtk, Gdk, GObject
import logging
import os
import xapian
from gettext import gettext as _
from cellrenderers import (CellRendererAppView,
CellButtonRenderer,
CellButtonIDs)
from softwarecenter.ui.gtk3.em import em, StockEms
from softwarecenter.enums import (AppActions, NonAppVisibility, Icons)
from softwarecenter.utils import ExecutionTime
from softwarecenter.backend import get_install_backend
from softwarecenter.netstatus import (get_network_watcher,
network_state_is_connected)
from softwarecenter.ui.gtk3.models.appstore2 import (AppGenericStore,
CategoryRowReference)
class AppTreeView(Gtk.TreeView):
"""Treeview based view component that takes a AppStore and displays it"""
VARIANT_INFO = 0
VARIANT_REMOVE = 1
VARIANT_INSTALL = 2
ACTION_BTNS = (VARIANT_REMOVE, VARIANT_INSTALL)
def __init__(self, app_view, icons, show_ratings, store=None):
Gtk.TreeView.__init__(self)
self._logger = logging.getLogger("softwarecenter.view.appview")
self.app_view = app_view
self.pressed = False
self.focal_btn = None
self._action_block_list = []
self.expanded_path = None
#~ # if this hacked mode is available everything will be fast
#~ # and we can set fixed_height mode and still have growing rows
#~ # (see upstream gnome #607447)
try:
self.set_property("ubuntu-almost-fixed-height-mode", True)
self.set_fixed_height_mode(True)
except:
self._logger.warn("ubuntu-almost-fixed-height-mode extension not available")
self.set_headers_visible(False)
# a11y: this is a cell renderer that only displays a icon, but still
# has a markup property for orca and friends
# we use it so that orca and other a11y tools get proper text to read
# it needs to be the first one, because that is what the tools look
# at by default
tr = CellRendererAppView(icons,
show_ratings,
Icons.INSTALLED_OVERLAY)
tr.set_pixbuf_width(32)
tr.set_button_spacing(em(0.3))
# create buttons and set initial strings
info = CellButtonRenderer(self,
name=CellButtonIDs.INFO)
info.set_markup_variants(
{self.VARIANT_INFO: _('More Info')})
action = CellButtonRenderer(self,
name=CellButtonIDs.ACTION)
action.set_markup_variants(
{self.VARIANT_INSTALL: _('Install'),
self.VARIANT_REMOVE: _('Remove')})
tr.button_pack_start(info)
tr.button_pack_end(action)
column = Gtk.TreeViewColumn("Applications", tr,
application=AppGenericStore.COL_ROW_DATA)
column.set_cell_data_func(tr, self._cell_data_func_cb)
column.set_fixed_width(200)
column.set_sizing(Gtk.TreeViewColumnSizing.FIXED)
self.append_column(column)
# network status watcher
watcher = get_network_watcher()
watcher.connect("changed", self._on_net_state_changed, tr)
# custom cursor
self._cursor_hand = Gdk.Cursor.new(Gdk.CursorType.HAND2)
self.connect("style-updated", self._on_style_updated, tr)
# button and motion are "special"
self.connect("button-press-event", self._on_button_press_event, tr)
self.connect("button-release-event", self._on_button_release_event, tr)
self.connect("key-press-event", self._on_key_press_event, tr)
self.connect("key-release-event", self._on_key_release_event, tr)
self.connect("motion-notify-event", self._on_motion, tr)
self.connect("cursor-changed", self._on_cursor_changed, tr)
# our own "activate" handler
self.connect("row-activated", self._on_row_activated, tr)
self.backend = get_install_backend()
self._transactions_connected = False
self.connect('realize', self._on_realize, tr)
@property
def appmodel(self):
model = self.get_model()
if isinstance(model, Gtk.TreeModelFilter):
return model.get_model()
return model
def clear_model(self):
vadjustment = self.get_scrolled_window_vadjustment()
if vadjustment:
vadjustment.set_value(0)
self.expanded_path = None
if self.appmodel:
self.appmodel.clear()
def expand_path(self, path):
if path is not None and not isinstance(path, Gtk.TreePath):
raise TypeError, "Expects Gtk.TreePath or None, got %s" % type(path)
model = self.get_model()
old = self.expanded_path
self.expanded_path = path
if old is not None:
try:
# lazy solution to Bug #846204
model.row_changed(old, model.get_iter(old))
except:
msg = "apptreeview.expand_path: Supplied 'old' path is an invalid tree path: '%s'" % old
logging.debug(msg)
if path == None: return
model.row_changed(path, model.get_iter(path))
return
# def is_action_in_progress_for_selected_app(self):
# """
# return True if an install or remove of the current package
# is in progress
# """
# (path, column) = self.get_cursor()
# if path:
# model = self.get_model()
# return (model[path][AppGenericStore.COL_ROW_DATA].transaction_progress != -1)
# return False
def get_scrolled_window_vadjustment(self):
ancestor = self.get_ancestor(Gtk.ScrolledWindow)
if ancestor:
return ancestor.get_vadjustment()
return None
def get_rowref(self, model, path):
if path == None: return None
return model[path][AppGenericStore.COL_ROW_DATA]
def rowref_is_category(self, rowref):
return isinstance(rowref, CategoryRowReference)
def _on_realize(self, widget, tr):
# connect to backend events once self is realized so handlers
# have access to the TreeView's initialised Gdk.Window
if self._transactions_connected: return
self.backend.connect("transaction-started", self._on_transaction_started, tr)
self.backend.connect("transaction-finished", self._on_transaction_finished, tr)
self.backend.connect("transaction-stopped", self._on_transaction_stopped, tr)
self._transactions_connected = True
return
def _calc_row_heights(self, tr):
ypad = StockEms.SMALL
tr.set_property('xpad', StockEms.MEDIUM)
tr.set_property('ypad', ypad)
for btn in tr.get_buttons():
# recalc button geometry and cache
btn.configure_geometry(self.create_pango_layout(""))
btn_h = btn.height
tr.normal_height = max(32 + 2*ypad, em(2.5) + ypad)
tr.selected_height = tr.normal_height + btn_h + StockEms.MEDIUM
return
def _on_style_updated(self, widget, tr):
self._calc_row_heights(tr)
return
def _on_motion(self, tree, event, tr):
window = self.get_window()
x, y = int(event.x), int(event.y)
if not self._xy_is_over_focal_row(x, y):
window.set_cursor(None)
return
path = tree.get_path_at_pos(x, y)
if not path:
window.set_cursor(None)
return
rowref = self.get_rowref(tree.get_model(), path[0])
if not rowref: return
if self.rowref_is_category(rowref):
window.set_cursor(None)
return
model = tree.get_model()
app = model[path[0]][AppGenericStore.COL_ROW_DATA]
if (not network_state_is_connected() and
not self.appmodel.is_installed(app)):
for btn_id in self.ACTION_BTNS:
btn_id = tr.get_button_by_name(CellButtonIDs.ACTION)
btn_id.set_sensitive(False)
use_hand = False
for btn in tr.get_buttons():
if btn.state == Gtk.StateFlags.INSENSITIVE:
continue
if btn.point_in(x, y):
use_hand = True
if self.focal_btn is btn:
btn.set_state(Gtk.StateFlags.ACTIVE)
elif not self.pressed:
btn.set_state(Gtk.StateFlags.PRELIGHT)
else:
if btn.state != Gtk.StateFlags.NORMAL:
btn.set_state(Gtk.StateFlags.NORMAL)
if use_hand:
window.set_cursor(self._cursor_hand)
else:
window.set_cursor(None)
return
def _on_cursor_changed(self, view, tr):
model = view.get_model()
sel = view.get_selection()
path = view.get_cursor()[0]
rowref = self.get_rowref(model, path)
if not rowref: return
if self.has_focus(): self.grab_focus()
if self.rowref_is_category(rowref):
self.expand_path(None)
return
sel.select_path(path)
self._update_selected_row(view, tr, path)
return
def _update_selected_row(self, view, tr, path=None):
sel = view.get_selection()
if not sel:
return False
model, rows = sel.get_selected_rows()
if not rows:
return False
row = rows[0]
if self.rowref_is_category(row):
return False
# update active app, use row-ref as argument
self.expand_path(row)
app = model[row][AppGenericStore.COL_ROW_DATA]
# make sure this is not a category (LP: #848085)
if self.rowref_is_category(app):
return False
action_btn = tr.get_button_by_name(
CellButtonIDs.ACTION)
#if not action_btn: return False
if self.appmodel.is_installed(app):
action_btn.set_variant(self.VARIANT_REMOVE)
action_btn.set_sensitive(True)
action_btn.show()
elif self.appmodel.is_available(app):
action_btn.set_variant(self.VARIANT_INSTALL)
action_btn.set_sensitive(True)
action_btn.show()
if not network_state_is_connected():
action_btn.set_sensitive(False)
self.app_view.emit("application-selected",
self.appmodel.get_application(app))
return
else:
action_btn.set_sensitive(False)
action_btn.hide()
self.app_view.emit("application-selected",
self.appmodel.get_application(app))
return
if self.appmodel.get_transaction_progress(app) > 0:
action_btn.set_sensitive(False)
elif self.pressed and self.focal_btn == action_btn:
action_btn.set_state(Gtk.StateFlags.ACTIVE)
else:
action_btn.set_state(Gtk.StateFlags.NORMAL)
#~ self.emit("application-selected", self.appmodel.get_application(app))
self.app_view.emit("application-selected", self.appmodel.get_application(app))
return False
def _on_row_activated(self, view, path, column, tr):
rowref = self.get_rowref(view.get_model(), path)
if not rowref: return
if self.rowref_is_category(rowref): return
x, y = self.get_pointer()
for btn in tr.get_buttons():
if btn.point_in(x, y):
return
self.app_view.emit("application-activated", self.appmodel.get_application(rowref))
return
def _on_button_event_get_path(self, view, event):
if event.button != 1: return False
res = view.get_path_at_pos(int(event.x), int(event.y))
if not res: return False
# check the path is valid and is not a category row
path = res[0]
is_cat = self.rowref_is_category(self.get_rowref(view.get_model(), path))
if path is None or is_cat: return False
# only act when the selection is already there
selection = view.get_selection()
if not selection.path_is_selected(path): return False
return path
def _on_button_press_event(self, view, event, tr):
if not self._on_button_event_get_path(view, event): return
self.pressed = True
x, y = int(event.x), int(event.y)
for btn in tr.get_buttons():
if btn.point_in(x, y) and (btn.state != Gtk.StateFlags.INSENSITIVE):
self.focal_btn = btn
btn.set_state(Gtk.StateFlags.ACTIVE)
view.queue_draw()
return
self.focal_btn = None
return
def _on_button_release_event(self, view, event, tr):
path = self._on_button_event_get_path(view, event)
if not path: return
self.pressed = False
x, y = int(event.x), int(event.y)
for btn in tr.get_buttons():
if btn.point_in(x, y) and (btn.state != Gtk.StateFlags.INSENSITIVE):
btn.set_state(Gtk.StateFlags.NORMAL)
self.get_window().set_cursor(self._cursor_hand)
if self.focal_btn is not btn:
break
self._init_activated(btn, view.get_model(), path)
view.queue_draw()
break
self.focal_btn = None
return
def _on_key_press_event(self, widget, event, tr):
kv = event.keyval
#print kv
r = False
if kv == Gdk.KEY_Right: # right-key
btn = tr.get_button_by_name(CellButtonIDs.ACTION)
if btn is None: return # Bug #846779
if btn.state != Gtk.StateFlags.INSENSITIVE:
btn.has_focus = True
btn = tr.get_button_by_name(CellButtonIDs.INFO)
btn.has_focus = False
elif kv == Gdk.KEY_Left: # left-key
btn = tr.get_button_by_name(CellButtonIDs.ACTION)
if btn is None: return # Bug #846779
btn.has_focus = False
btn = tr.get_button_by_name(CellButtonIDs.INFO)
btn.has_focus = True
elif kv == Gdk.KEY_space: # spacebar
for btn in tr.get_buttons():
if (btn is not None and btn.has_focus and
btn.state != Gtk.StateFlags.INSENSITIVE):
btn.set_state(Gtk.StateFlags.ACTIVE)
sel = self.get_selection()
model, it = sel.get_selected()
path = model.get_path(it)
if path:
#self._init_activated(btn, self.get_model(), path)
r = True
break
self.queue_draw()
return r
def _on_key_release_event(self, widget, event, tr):
kv = event.keyval
r = False
if kv == 32: # spacebar
for btn in tr.get_buttons():
if btn.has_focus and btn.state != Gtk.StateFlags.INSENSITIVE:
btn.set_state(Gtk.StateFlags.NORMAL)
sel = self.get_selection()
model, it = sel.get_selected()
path = model.get_path(it)
if path:
self._init_activated(btn, self.get_model(), path)
btn.has_focus = False
r = True
break
self.queue_draw()
return r
def _init_activated(self, btn, model, path):
app = model[path][AppGenericStore.COL_ROW_DATA]
s = Gtk.Settings.get_default()
GObject.timeout_add(s.get_property("gtk-timeout-initial"),
self._app_activated_cb,
btn,
btn.name,
app,
model,
path)
return
def _cell_data_func_cb(self, col, cell, model, it, user_data):
path = model.get_path(it)
if model[path][0] is None:
indices = path.get_indices()
model.load_range(indices, 5)
is_active = path == self.expanded_path
cell.set_property('isactive', is_active)
return
def _app_activated_cb(self, btn, btn_id, app, store, path):
if self.rowref_is_category(app):
return
# FIXME: would be nice if that would be more elegant
# because we use a treefilter we need to get the "real"
# model first
if type(store) is Gtk.TreeModelFilter:
store = store.get_model()
pkgname = self.appmodel.get_pkgname(app)
if btn_id == CellButtonIDs.INFO:
self.app_view.emit("application-activated", self.appmodel.get_application(app))
elif btn_id == CellButtonIDs.ACTION:
btn.set_sensitive(False)
store.row_changed(path, store.get_iter(path))
# be sure we dont request an action for a pkg with pre-existing actions
if pkgname in self._action_block_list:
logging.debug("Action already in progress for package: '%s'" % pkgname)
return False
self._action_block_list.append(pkgname)
if self.appmodel.is_installed(app):
perform_action = AppActions.REMOVE
else:
perform_action = AppActions.INSTALL
store.notify_action_request(app, path)
self.app_view.emit("application-request-action",
self.appmodel.get_application(app),
[], [], perform_action)
return False
def _set_cursor(self, btn, cursor):
# make sure we have a window instance (LP: #617004)
window = self.get_window()
if isinstance(window, Gdk.Window):
x, y = self.get_pointer()
if btn.point_in(x, y):
window.set_cursor(cursor)
def _on_transaction_started(self, backend, pkgname, appname, trans_id, trans_type, tr):
""" callback when an application install/remove transaction has started """
action_btn = tr.get_button_by_name(CellButtonIDs.ACTION)
if action_btn:
action_btn.set_sensitive(False)
self._set_cursor(action_btn, None)
def _on_transaction_finished(self, backend, result, tr):
""" callback when an application install/remove transaction has finished """
# need to send a cursor-changed so the row button is properly updated
self.emit("cursor-changed")
# remove pkg from the block list
self._check_remove_pkg_from_blocklist(result.pkgname)
action_btn = tr.get_button_by_name(CellButtonIDs.ACTION)
if action_btn:
action_btn.set_sensitive(True)
self._set_cursor(action_btn, self._cursor_hand)
def _on_transaction_stopped(self, backend, result, tr):
""" callback when an application install/remove transaction has stopped """
# remove pkg from the block list
self._check_remove_pkg_from_blocklist(result.pkgname)
action_btn = tr.get_button_by_name(CellButtonIDs.ACTION)
if action_btn:
# this should be a function that decides action button state label...
if action_btn.current_variant == self.VARIANT_INSTALL:
action_btn.set_markup(self.VARIANT_REMOVE)
action_btn.set_sensitive(True)
self._set_cursor(action_btn, self._cursor_hand)
def _on_net_state_changed(self, watcher, state, tr):
self._update_selected_row(self, tr)
# queue a draw just to be sure the view is looking right
self.queue_draw()
return
def _check_remove_pkg_from_blocklist(self, pkgname):
if pkgname in self._action_block_list:
i = self._action_block_list.index(pkgname)
del self._action_block_list[i]
def _xy_is_over_focal_row(self, x, y):
res = self.get_path_at_pos(x, y)
#cur = self.get_cursor()
if not res:
return False
return self.get_path_at_pos(x, y)[0] == self.get_cursor()[0]
def get_query_from_search_entry(search_term):
if not search_term:
return xapian.Query("")
parser = xapian.QueryParser()
user_query = parser.parse_query(search_term)
return user_query
def on_entry_changed(widget, data):
def _work():
new_text = widget.get_text()
(view, enquirer) = data
with ExecutionTime("total time"):
with ExecutionTime("enquire.set_query()"):
enquirer.set_query(get_query_from_search_entry(new_text),
limit=100*1000,
nonapps_visible=NonAppVisibility.ALWAYS_VISIBLE)
store = view.tree_view.get_model()
with ExecutionTime("store.clear()"):
store.clear()
with ExecutionTime("store.set_documents()"):
store.set_from_matches(enquirer.matches)
with ExecutionTime("model settle (size=%s)" % len(store)):
while Gtk.events_pending():
Gtk.main_iteration()
return
if widget.stamp: GObject.source_remove(widget.stamp)
widget.stamp = GObject.timeout_add(250, _work)
def get_test_window():
import softwarecenter.log
softwarecenter.log.root.setLevel(level=logging.DEBUG)
softwarecenter.log.add_filters_from_string("performance")
fmt = logging.Formatter("%(name)s - %(message)s", None)
softwarecenter.log.handler.setFormatter(fmt)
from softwarecenter.paths import XAPIAN_BASE_PATH
xapian_base_path = XAPIAN_BASE_PATH
pathname = os.path.join(xapian_base_path, "xapian")
# the store
from softwarecenter.db.pkginfo import get_pkg_info
cache = get_pkg_info()
cache.open()
# the db
from softwarecenter.db.database import StoreDatabase
db = StoreDatabase(pathname, cache)
db.open()
# additional icons come from app-install-data
icons = Gtk.IconTheme.get_default()
icons.prepend_search_path("/usr/share/app-install/icons/")
icons.prepend_search_path("/usr/share/software-center/icons/")
# create a filter
from softwarecenter.db.appfilter import AppFilter
filter = AppFilter(db, cache)
filter.set_supported_only(False)
filter.set_installed_only(True)
# appview
from softwarecenter.ui.gtk3.models.appstore2 import AppListStore
from softwarecenter.db.enquire import AppEnquire
enquirer = AppEnquire(cache, db)
store = AppListStore(db, cache, icons)
from softwarecenter.ui.gtk3.views.appview import AppView
view = AppView(db, cache, icons, show_ratings=True)
view.set_model(store)
entry = Gtk.Entry()
entry.stamp = 0
entry.connect("changed", on_entry_changed, (view, enquirer))
entry.set_text("gtk3")
scroll = Gtk.ScrolledWindow()
box = Gtk.VBox()
box.pack_start(entry, False, True, 0)
box.pack_start(scroll, True, True, 0)
win = Gtk.Window()
win.connect("destroy", lambda x: Gtk.main_quit())
scroll.add(view)
win.add(box)
win.set_size_request(600, 400)
win.show_all()
return win
if __name__ == "__main__":
win = get_test_window()
Gtk.main()
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/software-center/softwarecenter/ui/gtk3/widgets/apptreeview.py
|
Python
|
gpl-3.0
| 23,470
|
[
"ORCA"
] |
b09a21320abb952a81a55dbcdf0522ddf4e50388815a4d83b0eb8db458516c04
|
"""
Surrogate analysis
------------------
This example shows how to estimate a significance threshold in a comodulogram.
A comodulogram shows the estimated PAC metric on a grid of frequency bands.
In absence of PAC, a PAC metric will return values close to zero, but not
exactly zero. To estimate if a value is significantly far from zero, we use a
surrogate analysis.
In a surrogate analysis, we recompute the comodulogram many times, adding each
time a random time shift to remove any possible coupling between the
components. Nota that these time shifts have to be far from zero to effectively
remove a potential coupling. These comodulograms give us an estimation of the
fluctuation of the metric in the absence of PAC.
To derive a significance level from the list of comodulograms, we discuss here
two different methods:
- Computing a z-score on each couple of frequency, and thresholding at z = 4.
- Computing a threshold at a given p-value, over a distribution of comodulogram
maxima.
"""
import numpy as np
import matplotlib.pyplot as plt
from pactools import Comodulogram
from pactools import simulate_pac
###############################################################################
# Let's first create an artificial signal with PAC.
fs = 200. # Hz
high_fq = 50.0 # Hz
low_fq = 5.0 # Hz
low_fq_width = 1.0 # Hz
n_points = 1000
noise_level = 0.4
signal = simulate_pac(n_points=n_points, fs=fs, high_fq=high_fq, low_fq=low_fq,
low_fq_width=low_fq_width, noise_level=noise_level,
random_state=0)
###############################################################################
# Then, let's define the range of low frequency, and the PAC metric used.
low_fq_range = np.linspace(1, 10, 50)
method = 'duprelatour' # or 'tort', 'ozkurt', 'penny', 'colgin', ...
###############################################################################
# We also choose the number of comodulograms computed in the surrogate
# analysis. A good rule of thumb is 10 / p_value. Example: 10 / 0.05 = 200.
n_surrogates = 200
###############################################################################
# As a surrogate analysis recquires to compute many comodulograms, the
# computation can be slow. If you have multiple cores in your CPU, you can
# leverage them using the parameter `n_jobs` > 1.
n_jobs = 1
###############################################################################
# To compute the comodulogram, we need to instanciate a `Comodulogram` object,
# then call the method `fit`. Adding the surrogate analysis is as simple as
# adding the `n_surrogates` parameter.
estimator = Comodulogram(fs=fs, low_fq_range=low_fq_range,
low_fq_width=low_fq_width, method=method,
n_surrogates=n_surrogates, progress_bar=True,
n_jobs=n_jobs)
estimator.fit(signal)
###############################################################################
# Then we plot the significance level on top of the comodulogram.
# Here we present two methods.
#
# The z-score method presented here considers independently each pair of
# frequency of the comodulogram. For each pair, we compute the mean `mu` and
# standard deviation `sigma` of the PAC values computed over the surrogates
# signals. We then transform the original PAC values `PAC` (non time-shifted)
# into z-scores `Z`: Z = (PAC - mu) / sigma
#
# This procedure, used for example in [Canolty et al, 2006], suffers from
# multiple-testing issues, and also assumes that the distribution of PAC values
# is Gaussian.
#
#
# The other method presented here considers the ditribution of comodulogram
# maxima. For each surrogate comodulogram, we compute the maximum PAC value.
# From the obtained empirical distribution of maxima, we compute the
# 95-percentile, which corresponds to a p-value of 0.05.
#
# This method does not assume the distribution to be Gaussian, nor suffers from
# multiple-testing issues. This is the default method in the `plot` method.
fig, axs = plt.subplots(1, 2, figsize=(10, 4))
z_score = 4.
estimator.plot(contour_method='z_score', contour_level=z_score,
titles=['With a z-score on each couple of frequency'],
axs=[axs[0]])
p_value = 0.05
estimator.plot(contour_method='comod_max', contour_level=p_value,
titles=['With a p-value on the distribution of maxima'],
axs=[axs[1]])
plt.show()
###############################################################################
# References
#
# [Canolty et al, 2006]
# Canolty, R. T., Edwards, E., Dalal, S. S., Soltani, M., Nagarajan,
# S. S., Kirsch, H. E., ... & Knight, R. T. (2006). High gamma power is
# phase-locked to theta oscillations in human neocortex. science,
# 313(5793), 1626-1628.
|
pactools/pactools
|
examples/plot_surrogate_analysis.py
|
Python
|
bsd-3-clause
| 4,809
|
[
"Gaussian"
] |
894e05e4699605f972b6504805afa367b79f1a6c23a93edd30f5bd4ec21e4b58
|
#!/usr/bin/env python3
#
# Copyright 2022 David Fort <contact@hardening-consulting.com>
#
# This script is meant to parse some FreeRDP logs in DEBUG mode (WLOG_LEVEL=DEBUG) and interpret the
# smartcard traffic, dissecting the PIV or GIDS commands
#
# usage:
# * live: WLOG_LEVEL=DEBUG xfreerdp <args with smartcard> | python3 smartcard-interpreter.py
# * on an existing log file: python3 smartcard-interpreter.py <log file>
#
import sys
import codecs
CMD_NAMES = {
0x04: "DESACTIVATE FILE",
0x0C: "ERASE RECORD",
0x0E: "ERASE BINARY",
0x0F: "ERASE BINARY",
0x20: "VERIFY",
0x21: "VERIFY",
0x22: "MSE",
0x24: "CHANGE REFERENCE DATA",
0x25: "MSE",
0x26: "DISABLE VERIFICATION REQUIREMENT",
0x28: "ENABLE VERIFICATION REQUIREMENT",
0x2A: "PSO",
0x2C: "RESET RETRY COUNTER",
0x2D: "RESET RETRY COUNTER",
0x44: "ACTIVATE FILE",
0x46: "GENERATE ASYMMETRIC KEY PAIR",
0x47: "GENERATE ASYMMETRIC KEY PAIR",
0x84: "GET CHALLENGE",
0x86: "GENERAL AUTHENTICATE",
0x87: "GENERAL AUTHENTICATE",
0x88: "INTERNAL AUTHENTICATE",
0xA0: "SEARCH BINARY",
0xA1: "SEARCH BINARY",
0xA2: "SEARCH RECORD",
0xA4: "SELECT",
0xB0: "READ BINARY",
0xB1: "READ BINARY",
0xB2: "READ RECORD",
0xB3: "READ RECORD",
0xC0: "GET RESPONSE",
0xC2: "ENVELOPPE",
0xC3: "ENVELOPPE",
0xCA: "GET DATA",
0xCB: "GET DATA",
0xD0: "WRITE BINARY",
0xD1: "WRITE BINARY",
0xD2: "WRITE RECORD",
0xD6: "UPDATE BINARY",
0xD7: "UPDATE BINARY",
0xDA: "PUT DATA",
0xDB: "PUT DATA",
0xDC: "UPDATE RECORD",
0xDD: "UPDATE RECORD",
0xE0: "CREATE FILE",
0xE2: "APPEND RECORD",
0xE4: "DELETE FILE",
0xE6: "TERMINATE DF",
0xE8: "TERMINATE EF",
0xFE: "TERMINATE CARD USAGE",
}
AIDs = {
"a00000039742544659": "MsGidsAID",
"a000000308": "PIV",
"a0000003974349445f0100": "SC PNP",
}
FIDs = {
0x0000: "Current EF",
0x2F00: "EF.DIR",
0x2F01: "EF.ATR",
0x3FFF: "Current application(ADF)",
}
DOs = {
"df1f": "DO_FILESYSTEMTABLE",
"df20": "DO_CARDID",
"df21": "DO_CARDAPPS",
"df22": "DO_CARDCF",
"df23": "DO_CMAPFILE",
"df24": "DO_KXC00",
}
ERROR_CODES = {
0x9000: "success",
0x6282: "end of file or record",
0x63C0: "warning counter 0",
0x63C1: "warning counter 1",
0x63C2: "warning counter 2",
0x63C3: "warning counter 3",
0x63C4: "warning counter 4",
0x63C5: "warning counter 5",
0x63C6: "warning counter 6",
0x63C7: "warning counter 7",
0x63C8: "warning counter 8",
0x63C9: "warning counter 9",
0x6982: "security status not satisfied",
0x6985: "condition of use not satisfied",
0x6A80: "incorrect parameter cmd data field",
0x6A81: "function not suppported",
0x6A82: "file or application not found",
0x6A83: "record not found",
0x6A88: "REFERENCE DATA NOT FOUND",
0x6D00: "unsupported",
}
PIV_OIDs = {
"5fc101": "X.509 Certificate for Card Authentication",
"5fc102": "Card Holder Unique Identifier",
"5fc103": "Cardholder Fingerprints",
"5fc105": "X.509 Certificate for PIV Authentication",
"5fc106": "Security Object",
"5fc107": "Card Capability Container",
"5fc108": "Cardholder Facial Image",
"5fc10a": "X.509 Certificate for Digital Signature",
"5fc10b": "X.509 Certificate for Key Management",
"5fc10d": "Retired X.509 Certificate for Key Management 1",
"5fc10e": "Retired X.509 Certificate for Key Management 2",
"5fc10f": "Retired X.509 Certificate for Key Management 3",
}
class ApplicationDummy(object):
def __init__(self, aid):
self.aid = aid
def getAID(self):
return self.aid
def selectResult(self, fci, status, body):
return 'selectResult(%s, %s, %s)\n' %(fci, status, body.hex())
def getData(self, fileId, bytes):
return 'getData(0x%x, %s)\n' %(fileId, bytes.hex())
def getDataResult(self, status, body):
return 'getDataResult(0x%x, %s)\n' %(status, body.hex())
def mse(self, body):
return body.hex()
def mseResult(self, status, body):
return body.hex()
def pso(self, body):
return body.hex()
def psoResult(self, status, body):
return body.hex()
class ApplicationPIV(object):
def __init__(self, aid):
self.lastGet = None
self.aid = aid
def getAID(self):
return self.aid
def selectResult(self, selectT, status, body):
ret = ''
appTag = body[0]
appLen = body[1]
body = body[2:2+appLen]
while len(body) > 2:
tag = body[0]
tagLen = body[1]
if selectT == "FCI":
if tag == 0x4f:
ret += "\tpiv version: %s\n" % body[2:2 + tagLen].hex()
elif tag == 0x79:
subBody = body[2:2 + tagLen]
subTag = subBody[0]
subLen = subBody[1]
content = subBody.hex()
if subTag == 0x4f:
v = content[4:]
if v.startswith('a000000308'):
content = 'NIST RID'
ret += '\tCoexistent tag allocation authority: %s\n' % content
elif tag == 0x50:
ret += '\tapplication label\n'
elif tag == 0xac:
ret += '\tCryptographic algorithms supported\n'
else:
rety += '\tunknown tag 0x%x\n' % tag
else:
ret += "\tTODO: selectType %s\n" % selectT
body = body[2+tagLen:]
return ret
def getData(self, fileId, bytes):
ret = "\tfileId=%s\n" % FIDs.get(fileId, "%0.4x" % fileId)
lc = bytes[4]
tag = bytes[5]
tagLen = bytes[6]
if lc == 4:
ret += "\tdoId=%0.4x\n"% (bytes[7] * 256 + bytes[8])
elif lc == 0xa:
keyStr = ''
# TLV
i = 7
tag = bytes[i]
tagLen = bytes[i+1]
keyRef = bytes[i+3]
keyStr = "key(tag=0x%x len=%d ref=0x%x)=" % (tag, tagLen, keyRef)
i = i + 2 + tagLen
tag = bytes[i]
tagLen = bytes[i+1]
keyStr += "value(tag=0x%x len=%d)"
elif lc == 5:
if tag == 0x5C:
tagStr = bytes[7:].hex()
ret += '\ttag: %s(%s)\n' % (tagStr, PIV_OIDs.get(tagStr, '<unknown>'))
self.lastGet = tagStr
else:
ret += "\tunknown key access\n"
return ret
def getDataResult(self, status, body):
ret = ''
if not len(body):
return ''
appTag = body[0]
appLen = body[1]
body = body[2:2+appLen]
while len(body) > 2:
tag = body[0]
tagLen = body[1]
tagBody = body[2:2+tagLen]
if self.lastGet in ('5fc102',):
# Card holder Unique Identifier
if tag == 0x30:
ret += '\tFASC-N: %s\n' % tagBody.hex()
elif tag == 0x34:
ret += '\tGUID: %s\n' % tagBody.hex()
elif tag == 0x35:
ret += '\texpirationDate: %s\n' % tagBody.decode('utf8')
elif tag == 0x3e:
ret += '\tIssuer Asymmetric Signature: %s\n' % tagBody.hex()
else:
ret += "\tunknown tag=0x%x len=%d content=%s\n" % (tag, tagLen, tagBody.hex())
else:
ret += "\t%s: unknown tag=0x%x len=%d content=%s\n" % (self.lastGet, tag, tagLen, tagBody.hex())
body = body[2+tagLen:]
return ret
def mse(self, body):
return body.hex()
def mseResult(self, status, body):
return body.hex()
def pso(self, body):
return body.hex()
def psoResult(self, status, body):
return body.hex()
class ApplicationGids(object):
def __init__(self, aid):
self.aid = aid
self.lastDo = None
def getAID(self):
return self.aid
def parseFcp(self, bytes):
ret = ''
tag = bytes[0]
tagLen = bytes[1]
body = bytes[2:2+tagLen]
if tag == 0x62:
ret += '\tFCP\n'
while len(body) > 2:
tag2 = body[0]
tag2Len = body[1]
tag2Body = body[2:2+tag2Len]
if tag2 == 0x82:
ret += '\t\tFileDescriptor: %s\n' % tag2Body.hex()
elif tag2 == 0x8a:
ret += '\t\tLifeCycleByte: %s\n' % tag2Body.hex()
elif tag2 == 0x84:
ret += '\t\tDF name: %s\n' % tag2Body.encode('utf8')
elif tag2 == 0x8C:
ret += '\t\tSecurityAttributes: %s\n' % tag2Body.hex()
else:
ret += '\t\tunhandled tag=0x%x body=%s\n' % (tag2, tag2Body.hex())
body = body[2+tag2Len:]
return ret
def parseFci(self, bytes):
ret = ''
tag = bytes[0]
tagLen = bytes[1]
body = bytes[2:2+tagLen]
if tag == 0x61:
ret += '\tFCI\n'
while len(body) > 2:
tag2 = body[0]
tag2Len = body[1]
tag2Body = body[2:2+tag2Len]
if tag2 == 0x4F:
ret += '\t\tApplication AID: %s\n' % tag2Body.hex()
elif tag2 == 0x50:
ret += '\t\tApplication label: %s\n' % tag2Body.encode('utf8')
elif tag2 == 0x73:
body2 = tag2Body
tokens = []
while len(body2) > 2:
tag3 = body2[0]
tag3Len = body2[1]
if tag3 == 0x40:
v = body2[2]
if v & 0x80:
tokens.append('mutualAuthSymAlgo')
if v & 0x40:
tokens.append('extAuthSymAlgo')
if v & 0x20:
tokens.append('keyEstabIntAuthECC')
body2 = body2[2+tag3Len:]
ret += '\t\tDiscretionary data objects: %s\n' % ",".join(tokens)
else:
ret += '\t\tunhandled tag=0x%x body=%s\n' % (tag2, tag2Body.hex())
body = body[2+tag2Len:]
return ret
def selectResult(self, selectT, status, body):
if not len(body):
return ''
if selectT == 'FCP':
return self.parseFcp(body)
elif selectT == 'FCI':
return self.parseFci(body)
else:
return '\tselectResult(%s, %s, %s)\n' % (selectT, status, body.hex())
def getData(self, fileId, bytes):
lc = bytes[4]
tag = bytes[5]
tagLen = bytes[6]
if tag == 0x5c:
doStr = bytes[7:7+tagLen].hex()
ret = '\tDO=%s\n' % DOs.get(doStr, "<%s>" % doStr)
self.lastDo = doStr
else:
ret = '\tunknown tag=0%x len=%d v=%s' % (tag, tagLen, bytes[7:7+tagLen].hex())
return ret
def getDataResult(self, status, body):
ret = ''
'''
while len(body) > 2:
tag = body[0]
tagLen = body[1]
ret += '\ttag=0x%x len=%d content=%s\n' % (tag, tagLen, body[2:2+tagLen].hex())
body = body[2+tagLen:]
'''
return ret
def mse(self, body):
return body.hex()
def mseResult(self, status, body):
return body.hex()
def pso(self, body):
return body.hex()
def psoResult(self, status, body):
return body.hex()
def createAppByAid(aid):
if aid == "a000000308":
return ApplicationPIV(aid)
elif aid in ('a00000039742544659',):
return ApplicationGids(aid)
return ApplicationDummy(aid)
if __name__ == '__main__':
if len(sys.argv) > 1:
fin = open(sys.argv[1], "r")
else:
fin = sys.stdin
lineno = 0
lastCmd = 0
lastSelect = None
lastSelectFCI = False
lastGetItem = None
currentApp = None
for l in fin.readlines():
lineno += 1
if not len(l):
continue
# smartcard loggers have changed
#if l.find("[DEBUG][com.freerdp.channels.smartcard.client]") == -1:
# continue
body = ''
recvKey = 'pbRecvBuffer: { '
pos = l.find(recvKey)
if pos != -1:
toCard = False
pos += len(recvKey)
pos2 = l.find(' }', pos)
if pos2 == -1:
print("line %d: invalid recvBuffer")
continue
else:
toCard = True
sendKey = 'pbSendBuffer: { '
pos = l.find(sendKey)
if pos == -1:
continue
pos += len(sendKey)
pos2 = l.find(' }', pos)
if pos2 == -1:
print("line %d: invalid sendBuffer")
continue
body = l[pos:pos2]
print(l[0:-1])
bytes = codecs.decode(body, 'hex')
if toCard:
(cla, ins, p1, p2) = bytes[0:4]
cmdName = CMD_NAMES.get(ins, "<COMMAND 0x%x>" % ins)
print(cmdName + ":")
if cmdName == "SELECT":
lc = bytes[4]
i = 5
if p1 == 0x00:
print("\tselectByFID: %0.2x%0.2x" % (bytes[i], bytes[i+1]))
i = i + lc
elif p1 == 0x4:
aid = bytes[i:i+lc].hex()
lastSelect = AIDs.get(aid, '')
print("\tselectByAID: %s(%s)" % (aid, lastSelect))
if p2 == 0x00:
lastSelectT = "FCI"
print('\tFCI')
elif p2 == 0x04:
print('\tFCP')
lastSelectT = "FCP"
elif p2 == 0x08:
print('\tFMD')
lastSelectT = "FMD"
if not currentApp or currentApp.getAID() != aid:
currentApp = createAppByAid(aid)
elif cmdName == "VERIFY":
lc = bytes[4]
P2_DATA_QUALIFIER = {
0x00: "Card global password",
0x01: "RFU",
0x80: "Application password",
0x81: "Application resetting password",
0x82: "Application security status resetting code",
}
pin=''
if lc:
pin = ", pin='" + bytes[5:5+lc-2].decode('utf8)') + "'"
print("\t%s%s" % (P2_DATA_QUALIFIER.get(p2, "<unknown>"), pin))
elif cmdName == "GET DATA":
lc = bytes[4]
fileId = p1 * 256 + p2
ret = currentApp.getData(fileId, bytes)
print("%s" % ret)
elif cmdName == "MSE":
ret = currentApp.mse(bytes[5:5+lc])
print("%s" % ret)
elif cmdName == "PSO":
ret = currentApp.pso(bytes[5:5+lc])
print("%s" % ret)
else:
print('handle %s' % cmdName)
lastCmd = cmdName
else:
# Responses
status = bytes[-1] + bytes[-2] * 256
body = bytes[0:-2]
print("status=0x%0.4x(%s)" % (status, ERROR_CODES.get(status, "<unknown>")))
if not len(body):
continue
ret = ''
if lastCmd == "SELECT":
ret = currentApp.selectResult(lastSelectT, status, body)
elif lastCmd == "GET DATA":
ret = currentApp.getDataResult(status, body)
elif lastCmd == "MSE":
ret = currentApp.mseResult(status, body)
elif lastCmd == "PSO":
ret = currentApp.psoResult(status, body)
if ret:
print("%s" % ret)
|
awakecoding/FreeRDP
|
tools/smartcard-interpreter.py
|
Python
|
apache-2.0
| 17,389
|
[
"ADF"
] |
12e2af7d097e12be3fcc5a44f94178ac270f0a2c37caa5ce8505ae190f0c5323
|
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Prabhu Ramachandran
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance
# Local imports.
from mayavi.components.contour import Contour as ContourComponent
from mayavi.core.pipeline_info import PipelineInfo
from mayavi.filters.wrapper import Wrapper
################################################################################
# `Contour` class.
################################################################################
class Contour(Wrapper):
"""
A contour filter that wraps around the Contour component to generate
iso-surfaces on any input dataset.
"""
# The version of this class. Used for persistence.
__version__ = 0
# The contour component this wraps.
filter = Instance(ContourComponent, args=(), record=True)
input_info = PipelineInfo(datasets=['any'],
attribute_types=['point'],
attributes=['any'])
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
|
dmsurti/mayavi
|
mayavi/filters/contour.py
|
Python
|
bsd-3-clause
| 1,197
|
[
"Mayavi"
] |
749f3ec702dec1244722d8d1bfb60562aee032d8c7b95f7fbe6e7f2608f964ef
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
from skbio.util._decorator import classproperty, stable, classonlymethod
from skbio._base import SkbioObject
from skbio.sequence import Protein, RNA
from skbio._base import ElasticLines
class GeneticCode(SkbioObject):
"""Genetic code for translating codons to amino acids.
Parameters
----------
amino_acids : consumable by ``skbio.Protein`` constructor
64-character vector containing IUPAC amino acid characters. The order
of the amino acids should correspond to NCBI's codon order (see *Notes*
section below). `amino_acids` is the "AAs" field in NCBI's genetic
code format [1]_.
starts : consumable by ``skbio.Protein`` constructor
64-character vector containing only M and - characters, with start
codons indicated by M. The order of the amino acids should correspond
to NCBI's codon order (see *Notes* section below). `starts` is the
"Starts" field in NCBI's genetic code format [1]_.
name : str, optional
Genetic code name. This is simply metadata and does not affect the
functionality of the genetic code itself.
See Also
--------
RNA.translate
DNA.translate
GeneticCode.from_ncbi
Notes
-----
The genetic codes available via ``GeneticCode.from_ncbi`` and used
throughout the examples are defined in [1]_. The genetic code strings
defined there are directly compatible with the ``GeneticCode`` constructor.
The order of `amino_acids` and `starts` should correspond to NCBI's codon
order, defined in [1]_::
UUUUUUUUUUUUUUUUCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
UUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGG
UCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAG
Note that scikit-bio displays this ordering using the IUPAC RNA alphabet,
while NCBI displays this same ordering using the IUPAC DNA alphabet (for
historical purposes).
References
----------
.. [1] http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi
Examples
--------
Get NCBI's standard genetic code (table ID 1, the default genetic code
in scikit-bio):
>>> from skbio import GeneticCode
>>> GeneticCode.from_ncbi()
GeneticCode (Standard)
-------------------------------------------------------------------------
AAs = FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG
Starts = ---M---------------M---------------M----------------------------
Base1 = UUUUUUUUUUUUUUUUCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
Base2 = UUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGG
Base3 = UCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAG
Get a different NCBI genetic code (25):
>>> GeneticCode.from_ncbi(25)
GeneticCode (Candidate Division SR1 and Gracilibacteria)
-------------------------------------------------------------------------
AAs = FFLLSSSSYY**CCGWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG
Starts = ---M-------------------------------M---------------M------------
Base1 = UUUUUUUUUUUUUUUUCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
Base2 = UUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGG
Base3 = UCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAG
Define a custom genetic code:
>>> GeneticCode('M' * 64, '-' * 64)
GeneticCode
-------------------------------------------------------------------------
AAs = MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
Starts = ----------------------------------------------------------------
Base1 = UUUUUUUUUUUUUUUUCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
Base2 = UUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGG
Base3 = UCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAG
Translate an RNA sequence to protein using NCBI's standard genetic code:
>>> from skbio import RNA
>>> rna = RNA('AUGCCACUUUAA')
>>> GeneticCode.from_ncbi().translate(rna)
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 MPL*
"""
_num_codons = 64
_radix_multiplier = np.asarray([16, 4, 1], dtype=np.uint8)
_start_stop_options = ['ignore', 'optional', 'require']
__offset_table = None
@classproperty
def _offset_table(cls):
if cls.__offset_table is None:
# create lookup table that is filled with 255 everywhere except for
# indices corresponding to U, C, A, and G. 255 was chosen to
# represent invalid character offsets because it will create an
# invalid (out of bounds) index into `amino_acids` which should
# error noisily. this is important in case the valid definite
# IUPAC RNA characters change in the future and the assumptions
# currently made by the code become invalid
table = np.empty(ord(b'U') + 1, dtype=np.uint8)
table.fill(255)
table[ord(b'U')] = 0
table[ord(b'C')] = 1
table[ord(b'A')] = 2
table[ord(b'G')] = 3
cls.__offset_table = table
return cls.__offset_table
@classonlymethod
@stable(as_of="0.4.0")
def from_ncbi(cls, table_id=1):
"""Return NCBI genetic code specified by table ID.
Parameters
----------
table_id : int, optional
Table ID of the NCBI genetic code to return.
Returns
-------
GeneticCode
NCBI genetic code specified by `table_id`.
Notes
-----
The table IDs and genetic codes available in this method and used
throughout the examples are defined in [1]_.
References
----------
.. [1] http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi
Examples
--------
Get the NCBI thraustochytrium mitochondrial genetic code (23):
>>> tmgc = GeneticCode.from_ncbi(23)
>>> tmgc.name
'Thraustochytrium Mitochondrial'
"""
if table_id not in _ncbi_genetic_codes:
raise ValueError(
"`table_id` must be one of %r, not %r"
% (sorted(_ncbi_genetic_codes), table_id))
return _ncbi_genetic_codes[table_id]
@classproperty
@stable(as_of="0.4.0")
def reading_frames(cls):
"""Six possible reading frames.
Reading frames are ordered:
* 1 (forward)
* 2 (forward)
* 3 (forward)
* -1 (reverse)
* -2 (reverse)
* -3 (reverse)
This property can be passed into
``GeneticCode.translate(reading_frame)``.
Returns
-------
list (int)
Six possible reading frames.
"""
return [1, 2, 3, -1, -2, -3]
@property
@stable(as_of="0.4.0")
def name(self):
"""Genetic code name.
This is simply metadata and does not affect the functionality of the
genetic code itself.
Returns
-------
str
Genetic code name.
"""
return self._name
@stable(as_of="0.4.0")
def __init__(self, amino_acids, starts, name=''):
self._set_amino_acids(amino_acids)
self._set_starts(starts)
self._name = name
def _set_amino_acids(self, amino_acids):
amino_acids = Protein(amino_acids)
if len(amino_acids) != self._num_codons:
raise ValueError("`amino_acids` must be length %d, not %d"
% (self._num_codons, len(amino_acids)))
indices = (amino_acids.values == b'M').nonzero()[0]
if indices.size < 1:
raise ValueError("`amino_acids` must contain at least one M "
"(methionine) character")
self._amino_acids = amino_acids
self._m_character_codon = self._index_to_codon(indices[0])
def _set_starts(self, starts):
starts = Protein(starts)
if len(starts) != self._num_codons:
raise ValueError("`starts` must be length %d, not %d"
% (self._num_codons, len(starts)))
if ((starts.values == b'M').sum() + (starts.values == b'-').sum() !=
len(starts)):
# to prevent the user from accidentally swapping `starts` and
# `amino_acids` and getting a translation back
raise ValueError("`starts` may only contain M and - characters")
self._starts = starts
indices = (self._starts.values == b'M').nonzero()[0]
codons = np.empty((indices.size, 3), dtype=np.uint8)
for i, index in enumerate(indices):
codons[i] = self._index_to_codon(index)
self._start_codons = codons
def _index_to_codon(self, index):
"""Convert AA index (0-63) to codon encoded in offsets (0-3)."""
codon = np.empty(3, dtype=np.uint8)
for i, multiplier in enumerate(self._radix_multiplier):
offset, index = divmod(index, multiplier)
codon[i] = offset
return codon
@stable(as_of="0.4.0")
def __str__(self):
"""Return string representation of the genetic code.
Returns
-------
str
Genetic code in NCBI genetic code format.
Notes
-----
Representation uses NCBI genetic code format defined in [1]_.
References
----------
.. [1] http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi
"""
return self._build_repr(include_name=False)
@stable(as_of="0.4.0")
def __repr__(self):
"""Return string representation of the genetic code.
Returns
-------
str
Genetic code in NCBI genetic code format.
Notes
-----
Representation uses NCBI genetic code format defined in [1]_ preceded
by a header. If the genetic code has a name, it will be included in the
header.
References
----------
.. [1] http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi
"""
return self._build_repr(include_name=True)
def _build_repr(self, include_name):
lines = ElasticLines()
if include_name:
name_line = self.__class__.__name__
if len(self.name) > 0:
name_line += ' (%s)' % self.name
lines.add_line(name_line)
lines.add_separator()
lines.add_line(' AAs = %s' % str(self._amino_acids))
lines.add_line('Starts = %s' % str(self._starts))
base1 = 'U' * 16 + 'C' * 16 + 'A' * 16 + 'G' * 16
lines.add_line('Base1 = %s' % base1)
base2 = ('U' * 4 + 'C' * 4 + 'A' * 4 + 'G' * 4) * 4
lines.add_line('Base2 = %s' % base2)
base3 = 'UCAG' * 16
lines.add_line('Base3 = %s' % base3)
return lines.to_str()
@stable(as_of="0.4.0")
def __eq__(self, other):
"""Determine if the genetic code is equal to another.
Genetic codes are equal if they are *exactly* the same type and
defined by the same `amino_acids` and `starts`. A genetic code's name
(accessed via ``name`` property) does not affect equality.
Parameters
----------
other : GeneticCode
Genetic code to test for equality against.
Returns
-------
bool
Indicates whether the genetic code is equal to `other`.
Examples
--------
NCBI genetic codes 1 and 2 are not equal:
>>> GeneticCode.from_ncbi(1) == GeneticCode.from_ncbi(2)
False
Define a custom genetic code:
>>> gc = GeneticCode('M' * 64, '-' * 64)
Define a second genetic code with the same `amino_acids` and `starts`.
Note that the presence of a name does not make the genetic codes
unequal:
>>> named_gc = GeneticCode('M' * 64, '-' * 64, name='example name')
>>> gc == named_gc
True
"""
if self.__class__ != other.__class__:
return False
# convert Protein to str so that metadata is ignored in comparison. we
# only care about the sequence data defining the genetic code
if str(self._amino_acids) != str(other._amino_acids):
return False
if str(self._starts) != str(other._starts):
return False
return True
@stable(as_of="0.4.0")
def __ne__(self, other):
"""Determine if the genetic code is not equal to another.
Genetic codes are not equal if their type, `amino_acids`, or `starts`
differ. A genetic code's name (accessed via ``name`` property) does not
affect equality.
Parameters
----------
other : GeneticCode
Genetic code to test for inequality against.
Returns
-------
bool
Indicates whether the genetic code is not equal to `other`.
"""
return not (self == other)
@stable(as_of="0.4.0")
def translate(self, sequence, reading_frame=1, start='ignore',
stop='ignore'):
"""Translate RNA sequence into protein sequence.
Parameters
----------
sequence : RNA
RNA sequence to translate.
reading_frame : {1, 2, 3, -1, -2, -3}
Reading frame to use in translation. 1, 2, and 3 are forward frames
and -1, -2, and -3 are reverse frames. If reverse (negative), will
reverse complement the sequence before translation.
start : {'ignore', 'require', 'optional'}
How to handle start codons:
* "ignore": translation will start from the beginning of the
reading frame, regardless of the presence of a start codon.
* "require": translation will start at the first start codon in
the reading frame, ignoring all prior positions. The first amino
acid in the translated sequence will *always* be methionine
(M character), even if an alternative start codon was used in
translation. This behavior most closely matches the underlying
biology since fMet doesn't have a corresponding IUPAC character.
If a start codon does not exist, a ``ValueError`` is raised.
* "optional": if a start codon exists in the reading frame, matches
the behavior of "require". If a start codon does not exist,
matches the behavior of "ignore".
stop : {'ignore', 'require', 'optional'}
How to handle stop codons:
* "ignore": translation will ignore the presence of stop codons and
translate to the end of the reading frame.
* "require": translation will terminate at the first stop codon.
The stop codon will not be included in the translated sequence.
If a stop codon does not exist, a ``ValueError`` is raised.
* "optional": if a stop codon exists in the reading frame, matches
the behavior of "require". If a stop codon does not exist,
matches the behavior of "ignore".
Returns
-------
Protein
Translated sequence.
See Also
--------
translate_six_frames
Notes
-----
Input RNA sequence metadata are included in the translated protein
sequence. Positional metadata are not included.
Examples
--------
Translate RNA into protein using NCBI's standard genetic code (table ID
1, the default genetic code in scikit-bio):
>>> from skbio import RNA, GeneticCode
>>> rna = RNA('AGUAUUCUGCCACUGUAAGAA')
>>> sgc = GeneticCode.from_ncbi()
>>> sgc.translate(rna)
Protein
--------------------------
Stats:
length: 7
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 SILPL*E
In this command, we used the default ``start`` behavior, which starts
translation at the beginning of the reading frame, regardless of the
presence of a start codon. If we specify "require", translation will
start at the first start codon in the reading frame (in this example,
CUG), ignoring all prior positions:
>>> sgc.translate(rna, start='require')
Protein
--------------------------
Stats:
length: 5
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 MPL*E
Note that the codon coding for L (CUG) is an alternative start codon in
this genetic code. Since we specified "require" mode, methionine (M)
was used in place of the alternative start codon (L). This behavior
most closely matches the underlying biology since fMet doesn't have a
corresponding IUPAC character.
Translate the same RNA sequence, also specifying that translation
terminate at the first stop codon in the reading frame:
>>> sgc.translate(rna, start='require', stop='require')
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 MPL
Passing "require" to both ``start`` and ``stop`` trims the translation
to the CDS (and in fact requires that one is present in the reading
frame). Changing the reading frame to 2 causes an exception to be
raised because a start codon doesn't exist in the reading frame:
>>> sgc.translate(rna, start='require', stop='require',
... reading_frame=2) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: ...
"""
self._validate_translate_inputs(sequence, reading_frame, start, stop)
offset = abs(reading_frame) - 1
if reading_frame < 0:
sequence = sequence.reverse_complement()
# Translation strategy:
#
# 1. Obtain view of underlying sequence bytes from the beginning of
# the reading frame.
# 2. Convert bytes to offsets (0-3, base 4 since there are only 4
# characters allowed: UCAG).
# 3. Reshape byte vector into (N, 3), where N is the number of codons
# in the reading frame. Each row represents a codon in the
# sequence.
# 4. (Optional) Find start codon in the reading frame and trim to
# this position. Replace start codon with M codon.
# 5. Convert each codon (encoded as offsets) into an index
# corresponding to an amino acid (0-63).
# 6. Obtain translated sequence by indexing into the amino acids
# vector (`amino_acids`) using the indices defined in step 5.
# 7. (Optional) Find first stop codon and trim to this position.
data = sequence.values[offset:].view(np.uint8)
# since advanced indexing is used with an integer ndarray, a copy is
# always returned. thus, the in-place modification made below
# (replacing the start codon) is safe.
data = self._offset_table[data]
data = data[:data.size // 3 * 3].reshape((-1, 3))
if start in {'require', 'optional'}:
start_codon_index = data.shape[0]
for start_codon in self._start_codons:
indices = np.all(data == start_codon, axis=1).nonzero()[0]
if indices.size > 0:
first_index = indices[0]
if first_index < start_codon_index:
start_codon_index = first_index
if start_codon_index != data.shape[0]:
data = data[start_codon_index:]
data[0] = self._m_character_codon
elif start == 'require':
self._raise_require_error('start', reading_frame)
indices = (data * self._radix_multiplier).sum(axis=1)
translated = self._amino_acids.values[indices]
if stop in {'require', 'optional'}:
stop_codon_indices = (translated == b'*').nonzero()[0]
if stop_codon_indices.size > 0:
translated = translated[:stop_codon_indices[0]]
elif stop == 'require':
self._raise_require_error('stop', reading_frame)
metadata = None
if sequence.has_metadata():
metadata = sequence.metadata
# turn off validation because `translated` is guaranteed to be valid
return Protein(translated, metadata=metadata, validate=False)
def _validate_translate_inputs(self, sequence, reading_frame, start, stop):
if not isinstance(sequence, RNA):
raise TypeError("Sequence to translate must be RNA, not %s" %
type(sequence).__name__)
if reading_frame not in self.reading_frames:
raise ValueError("`reading_frame` must be one of %r, not %r" %
(self.reading_frames, reading_frame))
for name, value in ('start', start), ('stop', stop):
if value not in self._start_stop_options:
raise ValueError("`%s` must be one of %r, not %r" %
(name, self._start_stop_options, value))
if sequence.has_gaps():
raise ValueError("scikit-bio does not support translation of "
"gapped sequences.")
if sequence.has_degenerates():
raise NotImplementedError("scikit-bio does not currently support "
"translation of degenerate sequences."
"`RNA.expand_degenerates` can be used "
"to obtain all definite versions "
"of a degenerate sequence.")
def _raise_require_error(self, name, reading_frame):
raise ValueError(
"Sequence does not contain a %s codon in the "
"current reading frame (`reading_frame=%d`). Presence "
"of a %s codon is required with `%s='require'`"
% (name, reading_frame, name, name))
@stable(as_of="0.4.0")
def translate_six_frames(self, sequence, start='ignore', stop='ignore'):
"""Translate RNA into protein using six possible reading frames.
The six possible reading frames are:
* 1 (forward)
* 2 (forward)
* 3 (forward)
* -1 (reverse)
* -2 (reverse)
* -3 (reverse)
Translated sequences are yielded in this order.
Parameters
----------
sequence : RNA
RNA sequence to translate.
start : {'ignore', 'require', 'optional'}
How to handle start codons. See ``GeneticCode.translate`` for
details.
stop : {'ignore', 'require', 'optional'}
How to handle stop codons. See ``GeneticCode.translate`` for
details.
Yields
------
Protein
Translated sequence in the current reading frame.
See Also
--------
translate
Notes
-----
This method is faster than (and equivalent to) performing six
independent translations using, for example:
``(gc.translate(seq, reading_frame=rf)
for rf in GeneticCode.reading_frames)``
Input RNA sequence metadata are included in each translated protein
sequence. Positional metadata are not included.
Examples
--------
Translate RNA into protein using the six possible reading frames and
NCBI's standard genetic code (table ID 1, the default genetic code in
scikit-bio):
>>> from skbio import RNA, GeneticCode
>>> rna = RNA('AUGCCACUUUAA')
>>> sgc = GeneticCode.from_ncbi()
>>> for protein in sgc.translate_six_frames(rna):
... protein
... print('')
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 MPL*
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 CHF
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 ATL
<BLANKLINE>
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 LKWH
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 *SG
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 KVA
<BLANKLINE>
"""
rc = sequence.reverse_complement()
for reading_frame in range(1, 4):
yield self.translate(sequence, reading_frame=reading_frame,
start=start, stop=stop)
for reading_frame in range(1, 4):
yield self.translate(rc, reading_frame=reading_frame,
start=start, stop=stop)
# defined at http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi
_ncbi_genetic_codes = {
1: GeneticCode(
'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
'---M---------------M---------------M----------------------------',
'Standard'),
2: GeneticCode(
'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSS**VVVVAAAADDEEGGGG',
'--------------------------------MMMM---------------M------------',
'Vertebrate Mitochondrial'),
3: GeneticCode(
'FFLLSSSSYY**CCWWTTTTPPPPHHQQRRRRIIMMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
'----------------------------------MM----------------------------',
'Yeast Mitochondrial'),
4: GeneticCode(
'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
'--MM---------------M------------MMMM---------------M------------',
'Mold, Protozoan, and Coelenterate Mitochondrial, and '
'Mycoplasma/Spiroplasma'),
5: GeneticCode(
'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSSSVVVVAAAADDEEGGGG',
'---M----------------------------MMMM---------------M------------',
'Invertebrate Mitochondrial'),
6: GeneticCode(
'FFLLSSSSYYQQCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
'-----------------------------------M----------------------------',
'Ciliate, Dasycladacean and Hexamita Nuclear'),
9: GeneticCode(
'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG',
'-----------------------------------M---------------M------------',
'Echinoderm and Flatworm Mitochondrial'),
10: GeneticCode(
'FFLLSSSSYY**CCCWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
'-----------------------------------M----------------------------',
'Euplotid Nuclear'),
11: GeneticCode(
'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
'---M---------------M------------MMMM---------------M------------',
'Bacterial, Archaeal and Plant Plastid'),
12: GeneticCode(
'FFLLSSSSYY**CC*WLLLSPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
'-------------------M---------------M----------------------------',
'Alternative Yeast Nuclear'),
13: GeneticCode(
'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSGGVVVVAAAADDEEGGGG',
'---M------------------------------MM---------------M------------',
'Ascidian Mitochondrial'),
14: GeneticCode(
'FFLLSSSSYYY*CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG',
'-----------------------------------M----------------------------',
'Alternative Flatworm Mitochondrial'),
16: GeneticCode(
'FFLLSSSSYY*LCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
'-----------------------------------M----------------------------',
'Chlorophycean Mitochondrial'),
21: GeneticCode(
'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNNKSSSSVVVVAAAADDEEGGGG',
'-----------------------------------M---------------M------------',
'Trematode Mitochondrial'),
22: GeneticCode(
'FFLLSS*SYY*LCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
'-----------------------------------M----------------------------',
'Scenedesmus obliquus Mitochondrial'),
23: GeneticCode(
'FF*LSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
'--------------------------------M--M---------------M------------',
'Thraustochytrium Mitochondrial'),
24: GeneticCode(
'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSSKVVVVAAAADDEEGGGG',
'---M---------------M---------------M---------------M------------',
'Pterobranchia Mitochondrial'),
25: GeneticCode(
'FFLLSSSSYY**CCGWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
'---M-------------------------------M---------------M------------',
'Candidate Division SR1 and Gracilibacteria')
}
|
gregcaporaso/scikit-bio
|
skbio/sequence/_genetic_code.py
|
Python
|
bsd-3-clause
| 31,064
|
[
"scikit-bio"
] |
b49a907aae1df1879d023f5e6ae38cae72eb41f5f4063845704d6a92e7dca179
|
from abc import abstractmethod, ABCMeta
import six
from collections import Iterable, OrderedDict
from copy import copy
class State(six.with_metaclass(ABCMeta)):
def __init__(self, state, name=None):
"""
Base class for State.
:param state: scalar or iterables
:param name: string name
"""
self.state = state
self.check_state()
self.name = name
self.label = name
@abstractmethod
def change(self):
"""
Change state to new state
:return:
"""
pass
def check_state(self):
"""
Check if the state make sense, raise Error if not
:return:
"""
pass
def __eq__(self, other):
"""
Compare two states
:param other: other state object
:return: bool
"""
if isinstance(self.state, Iterable):
return all([i == j for i, j in zip(self.state, other.state)])
else:
return self.state == other.state
def copy(self):
"""
copy a state
:return: new state object with same state variable
"""
new_state = self.__class__(copy(self.state), self.name)
# copy other attributes
# new_state.__dict__.update({i: j for i, j in self.__dict__.items() if i not in ['state', 'name']})
return new_state
class StaticState(State):
"""
StaticState does not change the state when calling the change method
"""
def change(self):
pass
class StateDict(OrderedDict):
"""
A collection of states. Usually one physical system is described by more than
one state variable
"""
def __init__(self, states=None, **kwargs):
if isinstance(states, (list, tuple)):
super(StateDict, self).__init__({i.name: i for i in states})
if kwargs is not None:
for key, value in kwargs.items():
self.update({key: StaticState(value, name=key)})
def __eq__(self, other):
keys = self.keys()
return all([self[i] == other[i] for i in keys])
def copy(self):
"""
Deep copy of a StateDict
:return: new StateDict object
"""
new_state_dict = StateDict.fromkeys(self.keys())
new_state_dict.update({i: self[i].copy() for i in self.keys()})
return new_state_dict
class StateStructure(six.with_metaclass(ABCMeta)):
"""
Structure with StateDict to describe the states
Each structure will be associated with a collection of state and can be converted to or from the states
"""
def __init__(self, structure, state_dict):
"""
:param structure: pymatgen structure
:param state_dict: StateDict object
"""
self.structure = structure.copy()
self.state_dict = state_dict.copy()
@abstractmethod
def structure_from_states(self, state_dict):
"""
Convert the state into pymatgen structure
: param state_dict: StateDict object
:return: structure corresponding to the state dictionary
"""
pass
@abstractmethod
def structure_to_states(self, structure):
"""
Convert structure to corresponding state dictionary
:param structure: pymatgen structure
:return: state dictionary
"""
pass
def to_states(self):
"""
Convert the object to state dictionary
:return: StateDict object
"""
return self.structure_to_states(self.structure)
def from_states(self, state_dict):
"""
Convert a state dictionary into structure
:param state_dict: StateDict object
:return:
"""
self.structure = self.structure_from_states(state_dict)
self.state_dict = state_dict
def change(self):
"""
Perform state changes for all items in the state dictionary and
update the structure
:return:
"""
[i.change() for i in self.state_dict.values()]
self.from_states(self.state_dict)
|
czhengsci/veidt
|
veidt/monte_carlo/base.py
|
Python
|
bsd-3-clause
| 4,107
|
[
"pymatgen"
] |
9f27c1891100cb895b968a57e4e15e48d2015b037f92d7126e238fde1c154d0c
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'kek.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1048, 650)
MainWindow.setMinimumSize(QtCore.QSize(460, 300))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setLayoutDirection(QtCore.Qt.LeftToRight)
self.tabWidget.setAutoFillBackground(False)
self.tabWidget.setTabPosition(QtWidgets.QTabWidget.North)
self.tabWidget.setElideMode(QtCore.Qt.ElideNone)
self.tabWidget.setObjectName("tabWidget")
self.tab_log = QtWidgets.QWidget()
self.tab_log.setAccessibleName("")
self.tab_log.setObjectName("tab_log")
self.gridLayout_2 = QtWidgets.QGridLayout(self.tab_log)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.treeWidget = QtWidgets.QTreeWidget(self.tab_log)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.treeWidget.sizePolicy().hasHeightForWidth())
self.treeWidget.setSizePolicy(sizePolicy)
self.treeWidget.setMinimumSize(QtCore.QSize(150, 0))
self.treeWidget.setMaximumSize(QtCore.QSize(150, 16777215))
self.treeWidget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.treeWidget.setItemsExpandable(False)
self.treeWidget.setExpandsOnDoubleClick(False)
self.treeWidget.setObjectName("treeWidget")
self.treeWidget.headerItem().setText(0, "1")
self.treeWidget.header().setVisible(False)
self.treeWidget.header().setStretchLastSection(False)
self.gridLayout_2.addWidget(self.treeWidget, 0, 0, 7, 1)
self.line_2 = QtWidgets.QFrame(self.tab_log)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.gridLayout_2.addWidget(self.line_2, 1, 2, 1, 1)
self.team1 = QtWidgets.QTableWidget(self.tab_log)
self.team1.setMinimumSize(QtCore.QSize(430, 0))
font = QtGui.QFont()
font.setPointSize(8)
self.team1.setFont(font)
self.team1.setMouseTracking(True)
self.team1.setToolTip("")
self.team1.setToolTipDuration(12)
self.team1.setLayoutDirection(QtCore.Qt.LeftToRight)
self.team1.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.team1.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
self.team1.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.team1.setDragDropOverwriteMode(False)
self.team1.setTextElideMode(QtCore.Qt.ElideMiddle)
self.team1.setShowGrid(True)
self.team1.setCornerButtonEnabled(False)
self.team1.setObjectName("team1")
self.team1.setColumnCount(0)
self.team1.setRowCount(0)
self.team1.horizontalHeader().setDefaultSectionSize(80)
self.team1.horizontalHeader().setMinimumSectionSize(20)
self.team1.horizontalHeader().setStretchLastSection(False)
self.team1.verticalHeader().setVisible(True)
self.team1.verticalHeader().setDefaultSectionSize(20)
self.team1.verticalHeader().setStretchLastSection(False)
self.gridLayout_2.addWidget(self.team1, 3, 2, 1, 1)
self.team2 = QtWidgets.QTableWidget(self.tab_log)
self.team2.setMinimumSize(QtCore.QSize(430, 0))
font = QtGui.QFont()
font.setPointSize(8)
self.team2.setFont(font)
self.team2.setMouseTracking(True)
self.team2.setToolTip("")
self.team2.setToolTipDuration(12)
self.team2.setLayoutDirection(QtCore.Qt.LeftToRight)
self.team2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.team2.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
self.team2.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.team2.setDragDropOverwriteMode(False)
self.team2.setTextElideMode(QtCore.Qt.ElideMiddle)
self.team2.setShowGrid(True)
self.team2.setCornerButtonEnabled(False)
self.team2.setObjectName("team2")
self.team2.setColumnCount(0)
self.team2.setRowCount(0)
self.team2.horizontalHeader().setDefaultSectionSize(80)
self.team2.horizontalHeader().setMinimumSectionSize(20)
self.team2.horizontalHeader().setStretchLastSection(False)
self.team2.verticalHeader().setVisible(True)
self.team2.verticalHeader().setDefaultSectionSize(20)
self.team2.verticalHeader().setStretchLastSection(False)
self.gridLayout_2.addWidget(self.team2, 6, 2, 1, 1)
self.label = QtWidgets.QLabel(self.tab_log)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 5, 2, 1, 1)
self.line = QtWidgets.QFrame(self.tab_log)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.gridLayout_2.addWidget(self.line, 4, 2, 1, 1)
self.label_2 = QtWidgets.QLabel(self.tab_log)
self.label_2.setObjectName("label_2")
self.gridLayout_2.addWidget(self.label_2, 2, 2, 1, 1)
spacerItem = QtWidgets.QSpacerItem(0, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem, 0, 1, 7, 1)
self.comboBox = QtWidgets.QComboBox(self.tab_log)
self.comboBox.setFrame(True)
self.comboBox.setObjectName("comboBox")
self.gridLayout_2.addWidget(self.comboBox, 0, 2, 1, 3)
self.compare = QtWidgets.QTableWidget(self.tab_log)
self.compare.setMaximumSize(QtCore.QSize(250, 16777215))
self.compare.setObjectName("compare")
self.compare.setColumnCount(0)
self.compare.setRowCount(0)
self.gridLayout_2.addWidget(self.compare, 2, 4, 5, 1)
spacerItem1 = QtWidgets.QSpacerItem(0, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem1, 1, 3, 6, 1)
self.line_4 = QtWidgets.QFrame(self.tab_log)
self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.gridLayout_2.addWidget(self.line_4, 1, 4, 1, 1)
self.label.raise_()
self.comboBox.raise_()
self.team2.raise_()
self.line.raise_()
self.label_2.raise_()
self.line_2.raise_()
self.team1.raise_()
self.treeWidget.raise_()
self.compare.raise_()
self.line_4.raise_()
self.tabWidget.addTab(self.tab_log, "")
self.tab_player = QtWidgets.QWidget()
self.tab_player.setObjectName("tab_player")
self.gridLayout_3 = QtWidgets.QGridLayout(self.tab_player)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.table_stat = QtWidgets.QTableWidget(self.tab_player)
self.table_stat.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.table_stat.setObjectName("table_stat")
self.table_stat.setColumnCount(0)
self.table_stat.setRowCount(0)
self.table_stat.horizontalHeader().setSortIndicatorShown(False)
self.table_stat.verticalHeader().setDefaultSectionSize(20)
self.table_stat.verticalHeader().setHighlightSections(True)
self.table_stat.verticalHeader().setSortIndicatorShown(False)
self.gridLayout_3.addWidget(self.table_stat, 4, 0, 1, 7)
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem2, 0, 5, 3, 1)
spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem3, 0, 3, 3, 1)
self.line_player3 = QtWidgets.QLineEdit(self.tab_player)
self.line_player3.setObjectName("line_player3")
self.gridLayout_3.addWidget(self.line_player3, 1, 4, 1, 1)
self.line_player4 = QtWidgets.QLineEdit(self.tab_player)
self.line_player4.setObjectName("line_player4")
self.gridLayout_3.addWidget(self.line_player4, 1, 6, 1, 1)
self.label_5 = QtWidgets.QLabel(self.tab_player)
self.label_5.setAlignment(QtCore.Qt.AlignCenter)
self.label_5.setObjectName("label_5")
self.gridLayout_3.addWidget(self.label_5, 0, 4, 1, 1)
self.label_6 = QtWidgets.QLabel(self.tab_player)
self.label_6.setAlignment(QtCore.Qt.AlignCenter)
self.label_6.setObjectName("label_6")
self.gridLayout_3.addWidget(self.label_6, 0, 6, 1, 1)
self.line_3 = QtWidgets.QFrame(self.tab_player)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.gridLayout_3.addWidget(self.line_3, 3, 0, 1, 7)
self.label_4 = QtWidgets.QLabel(self.tab_player)
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName("label_4")
self.gridLayout_3.addWidget(self.label_4, 0, 2, 1, 1)
self.line_player1 = QtWidgets.QLineEdit(self.tab_player)
self.line_player1.setObjectName("line_player1")
self.gridLayout_3.addWidget(self.line_player1, 1, 0, 1, 1)
spacerItem4 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem4, 0, 1, 3, 1)
self.line_player2 = QtWidgets.QLineEdit(self.tab_player)
self.line_player2.setObjectName("line_player2")
self.gridLayout_3.addWidget(self.line_player2, 1, 2, 1, 1)
self.label_3 = QtWidgets.QLabel(self.tab_player)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.gridLayout_3.addWidget(self.label_3, 0, 0, 1, 1)
self.tabWidget.addTab(self.tab_player, "")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.gridLayout_4 = QtWidgets.QGridLayout(self.tab)
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.groupBox_3 = QtWidgets.QGroupBox(self.tab)
self.groupBox_3.setObjectName("groupBox_3")
self.gridLayout_8 = QtWidgets.QGridLayout(self.groupBox_3)
self.gridLayout_8.setObjectName("gridLayout_8")
self.parts_have = QtWidgets.QTableWidget(self.groupBox_3)
self.parts_have.setShowGrid(True)
self.parts_have.setGridStyle(QtCore.Qt.SolidLine)
self.parts_have.setObjectName("parts_have")
self.parts_have.setColumnCount(0)
self.parts_have.setRowCount(0)
self.parts_have.horizontalHeader().setVisible(False)
self.parts_have.horizontalHeader().setDefaultSectionSize(50)
self.parts_have.verticalHeader().setDefaultSectionSize(20)
self.gridLayout_8.addWidget(self.parts_have, 0, 0, 1, 1)
self.gridLayout_4.addWidget(self.groupBox_3, 1, 0, 1, 1)
self.groupBox_4 = QtWidgets.QGroupBox(self.tab)
self.groupBox_4.setObjectName("groupBox_4")
self.gridLayout_6 = QtWidgets.QGridLayout(self.groupBox_4)
self.gridLayout_6.setObjectName("gridLayout_6")
self.materials_need = QtWidgets.QTableWidget(self.groupBox_4)
self.materials_need.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.materials_need.setShowGrid(True)
self.materials_need.setGridStyle(QtCore.Qt.SolidLine)
self.materials_need.setObjectName("materials_need")
self.materials_need.setColumnCount(0)
self.materials_need.setRowCount(0)
self.materials_need.horizontalHeader().setVisible(False)
self.materials_need.horizontalHeader().setDefaultSectionSize(50)
self.materials_need.verticalHeader().setDefaultSectionSize(20)
self.gridLayout_6.addWidget(self.materials_need, 0, 0, 1, 1)
self.gridLayout_4.addWidget(self.groupBox_4, 2, 1, 1, 1)
self.groupBox_2 = QtWidgets.QGroupBox(self.tab)
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayout_7 = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout_7.setObjectName("gridLayout_7")
self.materials_have = QtWidgets.QTableWidget(self.groupBox_2)
self.materials_have.setShowGrid(True)
self.materials_have.setGridStyle(QtCore.Qt.SolidLine)
self.materials_have.setObjectName("materials_have")
self.materials_have.setColumnCount(0)
self.materials_have.setRowCount(0)
self.materials_have.horizontalHeader().setVisible(False)
self.materials_have.horizontalHeader().setDefaultSectionSize(50)
self.materials_have.verticalHeader().setDefaultSectionSize(20)
self.gridLayout_7.addWidget(self.materials_have, 0, 0, 1, 1)
self.gridLayout_4.addWidget(self.groupBox_2, 1, 1, 1, 1)
self.groupBox_5 = QtWidgets.QGroupBox(self.tab)
self.groupBox_5.setObjectName("groupBox_5")
self.gridLayout_5 = QtWidgets.QGridLayout(self.groupBox_5)
self.gridLayout_5.setObjectName("gridLayout_5")
self.parts_need = QtWidgets.QTableWidget(self.groupBox_5)
self.parts_need.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.parts_need.setShowGrid(True)
self.parts_need.setGridStyle(QtCore.Qt.SolidLine)
self.parts_need.setObjectName("parts_need")
self.parts_need.setColumnCount(0)
self.parts_need.setRowCount(0)
self.parts_need.horizontalHeader().setVisible(False)
self.parts_need.horizontalHeader().setDefaultSectionSize(50)
self.parts_need.verticalHeader().setDefaultSectionSize(20)
self.gridLayout_5.addWidget(self.parts_need, 0, 0, 1, 1)
self.gridLayout_4.addWidget(self.groupBox_5, 2, 0, 1, 1)
self.groupBox = QtWidgets.QGroupBox(self.tab)
self.groupBox.setObjectName("groupBox")
self.gridLayout_9 = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout_9.setObjectName("gridLayout_9")
self.groupBox_7 = QtWidgets.QGroupBox(self.groupBox)
self.groupBox_7.setObjectName("groupBox_7")
self.gridLayout_11 = QtWidgets.QGridLayout(self.groupBox_7)
self.gridLayout_11.setObjectName("gridLayout_11")
self.fed_s = QtWidgets.QCheckBox(self.groupBox_7)
self.fed_s.setObjectName("fed_s")
self.gridLayout_11.addWidget(self.fed_s, 0, 1, 1, 1)
self.jericho_l = QtWidgets.QCheckBox(self.groupBox_7)
self.jericho_l.setObjectName("jericho_l")
self.gridLayout_11.addWidget(self.jericho_l, 2, 2, 1, 1)
self.jericho_m = QtWidgets.QCheckBox(self.groupBox_7)
self.jericho_m.setObjectName("jericho_m")
self.gridLayout_11.addWidget(self.jericho_m, 1, 2, 1, 1)
self.fed_m = QtWidgets.QCheckBox(self.groupBox_7)
self.fed_m.setObjectName("fed_m")
self.gridLayout_11.addWidget(self.fed_m, 1, 1, 1, 1)
self.empire_l = QtWidgets.QCheckBox(self.groupBox_7)
self.empire_l.setObjectName("empire_l")
self.gridLayout_11.addWidget(self.empire_l, 2, 0, 1, 1)
self.fed_l = QtWidgets.QCheckBox(self.groupBox_7)
self.fed_l.setObjectName("fed_l")
self.gridLayout_11.addWidget(self.fed_l, 2, 1, 1, 1)
self.empire_s = QtWidgets.QCheckBox(self.groupBox_7)
self.empire_s.setObjectName("empire_s")
self.gridLayout_11.addWidget(self.empire_s, 0, 0, 1, 1)
self.empire_m = QtWidgets.QCheckBox(self.groupBox_7)
self.empire_m.setObjectName("empire_m")
self.gridLayout_11.addWidget(self.empire_m, 1, 0, 1, 1)
self.jericho_s = QtWidgets.QCheckBox(self.groupBox_7)
self.jericho_s.setObjectName("jericho_s")
self.gridLayout_11.addWidget(self.jericho_s, 0, 2, 1, 1)
self.gridLayout_9.addWidget(self.groupBox_7, 1, 0, 1, 2)
self.groupBox_8 = QtWidgets.QGroupBox(self.groupBox)
self.groupBox_8.setObjectName("groupBox_8")
self.gridLayout_12 = QtWidgets.QGridLayout(self.groupBox_8)
self.gridLayout_12.setObjectName("gridLayout_12")
self.empire_11 = QtWidgets.QCheckBox(self.groupBox_8)
self.empire_11.setObjectName("empire_11")
self.gridLayout_12.addWidget(self.empire_11, 1, 0, 1, 1)
self.empire_14 = QtWidgets.QCheckBox(self.groupBox_8)
self.empire_14.setObjectName("empire_14")
self.gridLayout_12.addWidget(self.empire_14, 2, 0, 1, 1)
self.fed_8 = QtWidgets.QCheckBox(self.groupBox_8)
self.fed_8.setObjectName("fed_8")
self.gridLayout_12.addWidget(self.fed_8, 0, 1, 1, 1)
self.jericho_8 = QtWidgets.QCheckBox(self.groupBox_8)
self.jericho_8.setObjectName("jericho_8")
self.gridLayout_12.addWidget(self.jericho_8, 0, 2, 1, 1)
self.fed_11 = QtWidgets.QCheckBox(self.groupBox_8)
self.fed_11.setObjectName("fed_11")
self.gridLayout_12.addWidget(self.fed_11, 1, 1, 1, 1)
self.jericho_14 = QtWidgets.QCheckBox(self.groupBox_8)
self.jericho_14.setObjectName("jericho_14")
self.gridLayout_12.addWidget(self.jericho_14, 2, 2, 1, 1)
self.empire_8 = QtWidgets.QCheckBox(self.groupBox_8)
self.empire_8.setObjectName("empire_8")
self.gridLayout_12.addWidget(self.empire_8, 0, 0, 1, 1)
self.fed_14 = QtWidgets.QCheckBox(self.groupBox_8)
self.fed_14.setObjectName("fed_14")
self.gridLayout_12.addWidget(self.fed_14, 2, 1, 1, 1)
self.jericho_11 = QtWidgets.QCheckBox(self.groupBox_8)
self.jericho_11.setObjectName("jericho_11")
self.gridLayout_12.addWidget(self.jericho_11, 1, 2, 1, 1)
self.gridLayout_9.addWidget(self.groupBox_8, 2, 0, 3, 2)
self.groupBox_9 = QtWidgets.QGroupBox(self.groupBox)
self.groupBox_9.setObjectName("groupBox_9")
self.gridLayout_13 = QtWidgets.QGridLayout(self.groupBox_9)
self.gridLayout_13.setObjectName("gridLayout_13")
self.spiral = QtWidgets.QCheckBox(self.groupBox_9)
self.spiral.setObjectName("spiral")
self.gridLayout_13.addWidget(self.spiral, 0, 0, 1, 1)
self.check_all = QtWidgets.QPushButton(self.groupBox_9)
self.check_all.setObjectName("check_all")
self.gridLayout_13.addWidget(self.check_all, 4, 0, 1, 1)
self.dart = QtWidgets.QCheckBox(self.groupBox_9)
self.dart.setObjectName("dart")
self.gridLayout_13.addWidget(self.dart, 3, 0, 1, 1)
self.ende = QtWidgets.QCheckBox(self.groupBox_9)
self.ende.setObjectName("ende")
self.gridLayout_13.addWidget(self.ende, 1, 0, 1, 1)
self.uncheck_all = QtWidgets.QPushButton(self.groupBox_9)
self.uncheck_all.setObjectName("uncheck_all")
self.gridLayout_13.addWidget(self.uncheck_all, 5, 0, 1, 1)
self.garg = QtWidgets.QCheckBox(self.groupBox_9)
self.garg.setObjectName("garg")
self.gridLayout_13.addWidget(self.garg, 2, 0, 1, 1)
self.gridLayout_9.addWidget(self.groupBox_9, 1, 2, 4, 1)
self.gridLayout_4.addWidget(self.groupBox, 0, 0, 1, 1)
self.groupBox_6 = QtWidgets.QGroupBox(self.tab)
self.groupBox_6.setObjectName("groupBox_6")
self.gridLayout_10 = QtWidgets.QGridLayout(self.groupBox_6)
self.gridLayout_10.setObjectName("gridLayout_10")
self.progress = QtWidgets.QTableWidget(self.groupBox_6)
self.progress.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.progress.setShowGrid(True)
self.progress.setGridStyle(QtCore.Qt.SolidLine)
self.progress.setObjectName("progress")
self.progress.setColumnCount(0)
self.progress.setRowCount(0)
self.progress.horizontalHeader().setVisible(False)
self.progress.horizontalHeader().setDefaultSectionSize(50)
self.progress.verticalHeader().setDefaultSectionSize(20)
self.gridLayout_10.addWidget(self.progress, 0, 0, 1, 1)
self.gridLayout_4.addWidget(self.groupBox_6, 0, 1, 1, 1)
self.tabWidget.addTab(self.tab, "")
self.gridLayout.addWidget(self.tabWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1048, 21))
self.menubar.setObjectName("menubar")
self.menuOpen = QtWidgets.QMenu(self.menubar)
self.menuOpen.setObjectName("menuOpen")
MainWindow.setMenuBar(self.menubar)
self.actionOpen = QtWidgets.QAction(MainWindow)
self.actionOpen.setObjectName("actionOpen")
self.actionExit = QtWidgets.QAction(MainWindow)
self.actionExit.setObjectName("actionExit")
self.open_file = QtWidgets.QAction(MainWindow)
self.open_file.setObjectName("open_file")
self.exit_btn = QtWidgets.QAction(MainWindow)
self.exit_btn.setObjectName("exit_btn")
self.actionOne = QtWidgets.QAction(MainWindow)
self.actionOne.setObjectName("actionOne")
self.actionDsadas = QtWidgets.QAction(MainWindow)
self.actionDsadas.setObjectName("actionDsadas")
self.menuOpen.addAction(self.open_file)
self.menuOpen.addSeparator()
self.menuOpen.addAction(self.exit_btn)
self.menubar.addAction(self.menuOpen.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "read dem logs - by Tillo"))
self.team1.setSortingEnabled(False)
self.team2.setSortingEnabled(False)
self.label.setText(_translate("MainWindow", "Team 2"))
self.label_2.setText(_translate("MainWindow", "Team 1"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_log), _translate("MainWindow", "Log reader"))
self.table_stat.setSortingEnabled(False)
self.label_5.setText(_translate("MainWindow", "Player 3"))
self.label_6.setText(_translate("MainWindow", "Player 4"))
self.label_4.setText(_translate("MainWindow", "Player 2"))
self.label_3.setText(_translate("MainWindow", "Player 1"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_player), _translate("MainWindow", "Compare players"))
self.groupBox_3.setTitle(_translate("MainWindow", "Parts you have"))
self.groupBox_4.setTitle(_translate("MainWindow", "Materials you need"))
self.groupBox_2.setTitle(_translate("MainWindow", "Materials you have"))
self.groupBox_5.setTitle(_translate("MainWindow", "Parts you need"))
self.groupBox.setTitle(_translate("MainWindow", "Ships"))
self.groupBox_7.setTitle(_translate("MainWindow", "R15 Craftables"))
self.fed_s.setText(_translate("MainWindow", "Peregrine"))
self.jericho_l.setText(_translate("MainWindow", "Ronin"))
self.jericho_m.setText(_translate("MainWindow", "Saw One"))
self.fed_m.setText(_translate("MainWindow", "Jaguar"))
self.empire_l.setText(_translate("MainWindow", "Kraken"))
self.fed_l.setText(_translate("MainWindow", "Octopus"))
self.empire_s.setText(_translate("MainWindow", "Cyning"))
self.empire_m.setText(_translate("MainWindow", "Mjolnir"))
self.jericho_s.setText(_translate("MainWindow", "Caltrop"))
self.groupBox_8.setTitle(_translate("MainWindow", "Dessies"))
self.empire_11.setText(_translate("MainWindow", "Brave"))
self.empire_14.setText(_translate("MainWindow", "Vigilant"))
self.fed_8.setText(_translate("MainWindow", "Procyon"))
self.jericho_8.setText(_translate("MainWindow", "Archon"))
self.fed_11.setText(_translate("MainWindow", "Antares"))
self.jericho_14.setText(_translate("MainWindow", "Tyrant"))
self.empire_8.setText(_translate("MainWindow", "Invincible"))
self.fed_14.setText(_translate("MainWindow", "Sirius"))
self.jericho_11.setText(_translate("MainWindow", "Sibyl"))
self.groupBox_9.setTitle(_translate("MainWindow", "Random"))
self.spiral.setText(_translate("MainWindow", "Spiral"))
self.check_all.setText(_translate("MainWindow", "check all"))
self.dart.setText(_translate("MainWindow", "Dart"))
self.ende.setText(_translate("MainWindow", "Endeavour"))
self.uncheck_all.setText(_translate("MainWindow", "uncheck all"))
self.garg.setText(_translate("MainWindow", "Gargoyle"))
self.groupBox_6.setTitle(_translate("MainWindow", "Progress"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Ship parts"))
self.menuOpen.setTitle(_translate("MainWindow", "File"))
self.actionOpen.setText(_translate("MainWindow", "Open..."))
self.actionExit.setText(_translate("MainWindow", "Exit"))
self.open_file.setText(_translate("MainWindow", "Open"))
self.exit_btn.setText(_translate("MainWindow", "Exit"))
self.actionOne.setText(_translate("MainWindow", "one"))
self.actionDsadas.setText(_translate("MainWindow", "dsadas"))
|
MightyEnki/sc-thingie
|
kek.py
|
Python
|
lgpl-3.0
| 26,611
|
[
"Jaguar",
"Octopus"
] |
5ad8193f67788c6411a649863a6fd14b19dbe4f35adfbc05bdb821ac98108174
|
# Nexus.py - a NEXUS parser
#
# Copyright 2005 by Frank Kauff & Cymon J. Cox. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Bug reports welcome: fkauff@duke.edu
#
"""Nexus class. Parse the contents of a nexus file.
Based upon 'NEXUS: An extensible file format for systematic information'
Maddison, Swofford, Maddison. 1997. Syst. Biol. 46(4):590-621
"""
import os,sys, math, random, copy
import sets
from copy import deepcopy
from Bio.Alphabet import IUPAC
from Bio.Data import IUPACData
from Bio.Seq import Seq
from Trees import Tree,NodeData
try:
import cnexus
except ImportError:
C=False
else:
C=True
INTERLEAVE=70
SPECIAL_COMMANDS=['charstatelabels','charlabels','taxlabels', 'taxset', 'charset','charpartition','taxpartition',\
'matrix','tree', 'utree','translate']
KNOWN_NEXUS_BLOCKS = ['trees','data', 'characters', 'taxa', 'sets']
PUNCTUATION='()[]{}/\,;:=*\'"`+-<>'
MRBAYESSAFE='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890_'
WHITESPACE=' \t\n'
#SPECIALCOMMENTS=['!','&','%','/','\\','@'] #original list of special comments
SPECIALCOMMENTS=['&'] # supported special comment ('tree' command), all others are ignored
CHARSET='chars'
TAXSET='taxa'
class NexusError(Exception): pass
class CharBuffer:
"""Helps reading NEXUS-words and characters from a buffer."""
def __init__(self,string):
if string:
self.buffer=list(string)
else:
self.buffer=[]
def peek(self):
if self.buffer:
return self.buffer[0]
else:
return None
def peek_nonwhitespace(self):
b=''.join(self.buffer).strip()
if b:
return b[0]
else:
return None
def next(self):
if self.buffer:
return self.buffer.pop(0)
else:
return None
def next_nonwhitespace(self):
while True:
p=self.next()
if p is None:
break
if p not in WHITESPACE:
return p
return None
def skip_whitespace(self):
while self.buffer[0] in WHITESPACE:
self.buffer=self.buffer[1:]
def next_until(self,target):
for t in target:
try:
pos=self.buffer.index(t)
except ValueError:
pass
else:
found=''.join(self.buffer[:pos])
self.buffer=self.buffer[pos:]
return found
else:
return None
def peek_word(self,word):
return ''.join(self.buffer[:len(word)])==word
def next_word(self):
"""Return the next NEXUS word from a string, dealing with single and double quotes,
whitespace and punctuation.
"""
word=[]
quoted=False
first=self.next_nonwhitespace() # get first character
if not first: # return empty if only whitespace left
return None
word.append(first)
if first=="'": # word starts with a quote
quoted=True
elif first in PUNCTUATION: # if it's punctuation, return immediately
return first
while True:
c=self.peek()
if c=="'": # a quote?
word.append(self.next()) # store quote
if self.peek()=="'": # double quote
skip=self.next() # skip second quote
elif quoted: # second single quote ends word
break
elif quoted:
word.append(self.next()) # if quoted, then add anything
elif not c or c in PUNCTUATION or c in WHITESPACE: # if not quoted and special character, stop
break
else:
word.append(self.next()) # standard character
return ''.join(word)
def rest(self):
"""Return the rest of the string without parsing."""
return ''.join(self.buffer)
class StepMatrix:
"""Calculate a stepmatrix for weighted parsimony.
See Wheeler (1990), Cladistics 6:269-275.
"""
def __init__(self,symbols,gap):
self.data={}
self.symbols=[s for s in symbols]
self.symbols.sort()
if gap:
self.symbols.append(gap)
for x in self.symbols:
for y in [s for s in self.symbols if s!=x]:
self.set(x,y,0)
def set(self,x,y,value):
if x>y:
x,y=y,x
self.data[x+y]=value
def add(self,x,y,value):
if x>y:
x,y=y,x
self.data[x+y]+=value
def sum(self):
return reduce(lambda x,y:x+y,self.data.values())
def transformation(self):
total=self.sum()
if total!=0:
for k in self.data:
self.data[k]=self.data[k]/float(total)
return self
def weighting(self):
for k in self.data:
if self.data[k]!=0:
self.data[k]=-math.log(self.data[k])
return self
def smprint(self,name='your_name_here'):
matrix='usertype %s stepmatrix=%d\n' % (name,len(self.symbols))
matrix+=' %s\n' % ' '.join(self.symbols)
for x in self.symbols:
matrix+='[%s]'.ljust(8) % x
for y in self.symbols:
if x==y:
matrix+=' . '
else:
if x>y:
x1,y1=y,x
else:
x1,y1=x,y
if self.data[x1+y1]==0:
matrix+='inf. '
else:
matrix+='%2.2f'.ljust(10) % (self.data[x1+y1])
matrix+='\n'
matrix+=';\n'
return matrix
def safename(name,mrbayes=False):
"""Return a taxon identifier according to NEXUS standard.
Wrap quotes around names with punctuation or whitespace, and double single quotes.
mrbayes=True: write names without quotes, whitespace or punctuation for mrbayes.
"""
if mrbayes:
safe=name.replace(' ','_')
safe=''.join([c for c in safe if c in MRBAYESSAFE])
else:
safe=name.replace("'","''")
if sets.Set(safe).intersection(sets.Set(WHITESPACE+PUNCTUATION)):
safe="'"+safe+"'"
return safe
def quotestrip(word):
"""Remove quotes and/or double quotes around identifiers."""
if not word:
return None
while (word.startswith("'") and word.endswith("'")) or (word.startswith('"') and word.endswith('"')):
word=word[1:-1]
return word
def get_start_end(sequence, skiplist=['-','?']):
"""Return position of first and last character which is not in skiplist (defaults to ['-','?'])."""
length=len(sequence)
if length==0:
return None,None
end=length-1
while end>=0 and (sequence[end] in skiplist):
end-=1
start=0
while start<length and (sequence[start] in skiplist):
start+=1
if start==length and end==-1: # empty sequence
return -1,-1
else:
return start,end
def _sort_keys_by_values(p):
"""Returns a sorted list of keys of p sorted by values of p."""
startpos=[(p[pn],pn) for pn in p if p[pn]]
startpos.sort()
return zip(*startpos)[1]
def _make_unique(l):
"""Check that all values in list are unique and return a pruned and sorted list."""
l=list(sets.Set(l))
l.sort()
return l
def _seqmatrix2strmatrix(matrix):
"""Converts a Seq-object matrix to a plain sequence-string matrix."""
return dict([(t,matrix[t].tostring()) for t in matrix])
def _compact4nexus(orig_list):
"""Transform [1 2 3 5 6 7 8 12 15 18 20] (baseindex 0, used in the Nexus class)
into '2-4 6-9 13-19\\3 21' (baseindex 1, used in programs like Paup or MrBayes.).
"""
if not orig_list:
return ''
orig_list=list(sets.Set(orig_list))
orig_list.sort()
shortlist=[]
clist=orig_list[:]
clist.append(clist[-1]+.5) # dummy value makes it easier
while len(clist)>1:
step=1
for i,x in enumerate(clist):
if x==clist[0]+i*step: # are we still in the right step?
continue
elif i==1 and len(clist)>3 and clist[i+1]-x==x-clist[0]:
# second element, and possibly at least 3 elements to link,
# and the next one is in the right step
step=x-clist[0]
else: # pattern broke, add all values before current position to new list
sub=clist[:i]
if len(sub)==1:
shortlist.append(str(sub[0]+1))
else:
if step==1:
shortlist.append('%d-%d' % (sub[0]+1,sub[-1]+1))
else:
shortlist.append('%d-%d\\%d' % (sub[0]+1,sub[-1]+1,step))
clist=clist[i:]
break
return ' '.join(shortlist)
def combine(matrices):
"""Combine matrices in [(name,nexus-instance),...] and return new nexus instance.
combined_matrix=combine([(name1,nexus_instance1),(name2,nexus_instance2),...]
Character sets, character partitions and taxon sets are prefixed, readjusted and present in
the combined matrix.
"""
if not matrices:
return None
name=matrices[0][0]
combined=copy.deepcopy(matrices[0][1]) # initiate with copy of first matrix
mixed_datatypes=(len(sets.Set([n[1].datatype for n in matrices]))>1)
if mixed_datatypes:
combined.datatype='None' # dealing with mixed matrices is application specific. You take care of that yourself!
# raise NexusError, 'Matrices must be of same datatype'
combined.charlabels=None
combined.statelabels=None
combined.interleave=False
combined.translate=None
# rename taxon sets and character sets and name them with prefix
for cn,cs in combined.charsets.items():
combined.charsets['%s.%s' % (name,cn)]=cs
del combined.charsets[cn]
for tn,ts in combined.taxsets.items():
combined.taxsets['%s.%s' % (name,tn)]=ts
del combined.taxsets[tn]
# previous partitions usually don't make much sense in combined matrix
# just initiate one new partition parted by single matrices
combined.charpartitions={'combined':{name:range(combined.nchar)}}
for n,m in matrices[1:]: # add all other matrices
both=[t for t in combined.taxlabels if t in m.taxlabels]
combined_only=[t for t in combined.taxlabels if t not in both]
m_only=[t for t in m.taxlabels if t not in both]
for t in both:
# concatenate sequences and unify gap and missing character symbols
combined.matrix[t]+=Seq(m.matrix[t].tostring().replace(m.gap,combined.gap).replace(m.missing,combined.missing),combined.alphabet)
# replace date of missing taxa with symbol for missing data
for t in combined_only:
combined.matrix[t]+=Seq(combined.missing*m.nchar,combined.alphabet)
for t in m_only:
combined.matrix[t]=Seq(combined.missing*combined.nchar,combined.alphabet)+\
Seq(m.matrix[t].tostring().replace(m.gap,combined.gap).replace(m.missing,combined.missing),combined.alphabet)
combined.taxlabels.extend(m_only) # new taxon list
for cn,cs in m.charsets.items(): # adjust character sets for new matrix
combined.charsets['%s.%s' % (n,cn)]=[x+combined.nchar for x in cs]
if m.taxsets:
if not combined.taxsets:
combined.taxsets={}
combined.taxsets.update(dict([('%s.%s' % (n,tn),ts) for tn,ts in m.taxsets.items()])) # update taxon sets
combined.charpartitions['combined'][n]=range(combined.nchar,combined.nchar+m.nchar) # update new charpartition
# update charlabels
if m.charlabels:
if not combined.charlabels:
combined.charlabels={}
combined.charlabels.update(dict([(combined.nchar+i,label) for (i,label) in m.charlabels.items()]))
combined.nchar+=m.nchar # update nchar and ntax
combined.ntax+=len(m_only)
return combined
def _kill_comments_and_break_lines(text):
"""Delete []-delimited comments out of a file and break into lines separated by ';'.
stripped_text=_kill_comments_and_break_lines(text):
Nested and multiline comments are allowed. [ and ] symbols within single
or double quotes are ignored, newline ends a quote, all symbols with quotes are
treated the same (thus not quoting inside comments like [this character ']' ends a comment])
Special [&...] and [\...] comments remain untouched, if not inside standard comment.
Quotes inside special [& and [\ are treated as normal characters,
but no nesting inside these special comments allowed (like [& [\ ]]).
';' ist deleted from end of line.
NOTE: this function is very slow for large files, and obsolete when using C extension cnexus
"""
contents=CharBuffer(text)
newtext=[]
newline=[]
quotelevel=''
speciallevel=False
commlevel=0
while True:
#plain=contents.next_until(["'",'"','[',']','\n',';']) # search for next special character
#if not plain:
# newline.append(contents.rest) # not found, just add the rest
# break
#newline.append(plain) # add intermediate text
t=contents.next() # and get special character
if t is None:
break
if t==quotelevel and not (commlevel or speciallevel): # matching quote ends quotation
quotelevel=''
elif not quotelevel and not (commlevel or speciallevel) and (t=='"' or t=="'"): # single or double quote starts quotation
quotelevel=t
elif not quotelevel and t=='[': # opening bracket outside a quote
if contents.peek() in SPECIALCOMMENTS and commlevel==0 and not speciallevel:
speciallevel=True
else:
commlevel+=1
elif not quotelevel and t==']': # closing bracket ioutside a quote
if speciallevel:
speciallevel=False
else:
commlevel-=1
if commlevel<0:
raise NexusError, 'Nexus formatting error: unmatched ]'
continue
if commlevel==0: # copy if we're not in comment
if t==';' and not quotelevel:
newtext.append(''.join(newline))
newline=[]
else:
newline.append(t)
#level of comments should be 0 at the end of the file
if newline:
newtext.append('\n'.join(newline))
if commlevel>0:
raise NexusError, 'Nexus formatting error: unmatched ['
return newtext
def _adjust_lines(lines):
"""Adjust linebreaks to match ';', strip leading/trailing whitespace
list_of_commandlines=_adjust_lines(input_text)
Lines are adjusted so that no linebreaks occur within a commandline
(except matrix command line)
"""
formatted_lines=[]
for l in lines:
#Convert line endings
l=l.replace('\r\n','\n').replace('\r','\n').strip()
if l.lower().startswith('matrix'):
formatted_lines.append(l)
else:
l=l.replace('\n',' ')
if l:
formatted_lines.append(l)
return formatted_lines
def _replace_parenthesized_ambigs(seq,rev_ambig_values):
"""Replaces ambigs in xxx(ACG)xxx format by IUPAC ambiguity code."""
opening=seq.find('(')
while opening>-1:
closing=seq.find(')')
if closing<0:
raise NexusError, 'Missing closing parenthesis in: '+seq
elif closing<opening:
raise NexusError, 'Missing opening parenthesis in: '+seq
ambig=[x for x in seq[opening+1:closing]]
ambig.sort()
ambig=''.join(ambig)
ambig_code=rev_ambig_values[ambig.upper()]
if ambig!=ambig.upper():
ambig_code=ambig_code.lower()
seq=seq[:opening]+ambig_code+seq[closing+1:]
opening=seq.find('(')
return seq
class Commandline:
"""Represent a commandline as command and options."""
def __init__(self, line, title):
self.options={}
options=[]
self.command=None
try:
#Assume matrix (all other command lines have been stripped of \n)
self.command, options = line.strip().split('\n', 1)
except ValueError: #Not matrix
#self.command,options=line.split(' ',1) #no: could be tab or spaces (translate...)
self.command=line.split()[0]
options=' '.join(line.split()[1:])
self.command = self.command.strip().lower()
if self.command in SPECIAL_COMMANDS: # special command that need newlines and order of options preserved
self.options=options.strip()
else:
if len(options) > 0:
try:
options = options.replace('=', ' = ').split()
valued_indices=[(n-1,n,n+1) for n in range(len(options)) if options[n]=='=' and n!=0 and n!=len((options))]
indices = []
for sl in valued_indices:
indices.extend(sl)
token_indices = [n for n in range(len(options)) if n not in indices]
for opt in valued_indices:
#self.options[options[opt[0]].lower()] = options[opt[2]].lower()
self.options[options[opt[0]].lower()] = options[opt[2]]
for token in token_indices:
self.options[options[token].lower()] = None
except ValueError:
raise NexusError, 'Incorrect formatting in line: %s' % line
class Block:
"""Represent a NEXUS block with block name and list of commandlines ."""
def __init__(self,title=None):
self.title=title
self.commandlines=[]
class Nexus(object):
__slots__=['original_taxon_order','__dict__']
def __init__(self, input=None):
self.ntax=0 # number of taxa
self.nchar=0 # number of characters
self.taxlabels=[] # labels for taxa, ordered by their id
self.charlabels=None # ... and for characters
self.statelabels=None # ... and for states
self.datatype='dna' # (standard), dna, rna, nucleotide, protein
self.respectcase=False # case sensitivity
self.missing='?' # symbol for missing characters
self.gap='-' # symbol for gap
self.symbols=None # set of symbols
self.equate=None # set of symbol synonyms
self.matchchar=None # matching char for matrix representation
self.labels=None # left, right, no
self.transpose=False # whether matrix is transposed
self.interleave=False # whether matrix is interleaved
self.tokens=False # unsupported
self.eliminate=None # unsupported
self.matrix=None # ...
self.unknown_blocks=[] # blocks we don't care about
self.taxsets={}
self.charsets={}
self.charpartitions={}
self.taxpartitions={}
self.trees=[] # list of Trees (instances of tree class)
self.translate=None # Dict to translate taxon <-> taxon numbers
self.structured=[] # structured input representation
self.set={} # dict of the set command to set various options
self.options={} # dict of the options command in the data block
# some defaults
self.options['gapmode']='missing'
if input:
self.read(input)
def get_original_taxon_order(self):
"""Included for backwards compatibility."""
return self.taxlabels
def set_original_taxon_order(self,value):
"""Included for backwards compatibility."""
self.taxlabels=value
original_taxon_order=property(get_original_taxon_order,set_original_taxon_order)
def read(self,input):
"""Read and parse NEXUS imput (filename, file-handle, string."""
# 1. Assume we have the name of a file in the execution dir
# Note we need to add parsing of the path to dir/filename
try:
file_contents = open(os.path.expanduser(input),'rU').read()
self.filename=input
except (TypeError,IOError,AttributeError):
#2 Assume we have a string from a fh.read()
if isinstance(input, str):
file_contents = input
self.filename='input_string'
#3 Assume we have a file object
elif hasattr(input,'read'): # file objects or StringIO objects
file_contents=input.read()
if hasattr(input,"name") and input.name:
self.filename=input.name
else:
self.filename='Unknown_nexus_file'
else:
print input.strip()[:50]
raise NexusError, 'Unrecognized input: %s ...' % input[:100]
file_contents=file_contents.strip()
if file_contents.startswith('#NEXUS'):
file_contents=file_contents[6:]
if C:
decommented=cnexus.scanfile(file_contents)
#check for unmatched parentheses
if decommented=='[' or decommented==']':
raise NexusError, 'Unmatched %s' % decommented
# cnexus can't return lists, so in analogy we separate commandlines with chr(7)
# (a character that shoudn't be part of a nexus file under normal circumstances)
commandlines=_adjust_lines(decommented.split(chr(7)))
else:
commandlines=_adjust_lines(_kill_comments_and_break_lines(file_contents))
# get rid of stupid 'NEXUS token'
try:
if commandlines[0][:6].upper()=='#NEXUS':
commandlines[0]=commandlines[0][6:].strip()
except:
pass
# now loop through blocks (we parse only data in known blocks, thus ignoring non-block commands
nexus_block_gen = self._get_nexus_block(commandlines)
while 1:
try:
title, contents = nexus_block_gen.next()
except StopIteration:
break
if title in KNOWN_NEXUS_BLOCKS:
self._parse_nexus_block(title, contents)
else:
self._unknown_nexus_block(title, contents)
def _get_nexus_block(self,file_contents):
"""Generator for looping through Nexus blocks."""
inblock=False
blocklines=[]
while file_contents:
cl=file_contents.pop(0)
if cl.lower().startswith('begin'):
if not inblock:
inblock=True
title=cl.split()[1].lower()
else:
raise NexusError('Illegal block nesting in block %s' % title)
elif cl.lower().startswith('end'):
if inblock:
inblock=False
yield title,blocklines
blocklines=[]
else:
raise NexusError('Unmatched \'end\'.')
elif inblock:
blocklines.append(cl)
def _unknown_nexus_block(self,title, contents):
block = Block()
block.commandlines.append(contents)
block.title = title
self.unknown_blocks.append(block)
def _parse_nexus_block(self,title, contents):
"""Parse a known Nexus Block """
# attached the structered block representation
self._apply_block_structure(title, contents)
#now check for taxa,characters,data blocks. If this stuff is defined more than once
#the later occurences will override the previous ones.
block=self.structured[-1]
for line in block.commandlines:
try:
getattr(self,'_'+line.command)(line.options)
except AttributeError:
raise
raise NexusError, 'Unknown command: %s ' % line.command
def _dimensions(self,options):
if options.has_key('ntax'):
self.ntax=eval(options['ntax'])
if options.has_key('nchar'):
self.nchar=eval(options['nchar'])
def _format(self,options):
# print options
# we first need to test respectcase, then symbols (which depends on respectcase)
# then datatype (which, if standard, depends on symbols and respectcase in order to generate
# dicts for ambiguous values and alphabet
if options.has_key('respectcase'):
self.respectcase=True
# adjust symbols to for respectcase
if options.has_key('symbols'):
self.symbols=options['symbols']
if (self.symbols.startswith('"') and self.symbols.endswith('"')) or\
(self.symbold.startswith("'") and self.symbols.endswith("'")):
self.symbols=self.symbols[1:-1].replace(' ','')
if not self.respectcase:
self.symbols=self.symbols.lower()+self.symbols.upper()
self.symbols=list(sets.Set(self.symbols))
if options.has_key('datatype'):
self.datatype=options['datatype'].lower()
if self.datatype=='dna' or self.datatype=='nucleotide':
self.alphabet=deepcopy(IUPAC.ambiguous_dna)
self.ambiguous_values=deepcopy(IUPACData.ambiguous_dna_values)
self.unambiguous_letters=deepcopy(IUPACData.unambiguous_dna_letters)
elif self.datatype=='rna':
self.alphabet=deepcopy(IUPAC.ambiguous_rna)
self.ambiguous_values=deepcopy(IUPACData.ambiguous_rna_values)
self.unambiguous_letters=deepcopy(IUPACData.unambiguous_rna_letters)
elif self.datatype=='protein':
self.alphabet=deepcopy(IUPAC.protein)
self.ambiguous_values={'B':'DN','Z':'EQ','X':deepcopy(IUPACData.protein_letters)} # that's how PAUP handles it
self.unambiguous_letters=deepcopy(IUPACData.protein_letters)+'*' # stop-codon
elif self.datatype=='standard':
raise NexusError('Datatype standard is not yet supported.')
#self.alphabet=None
#self.ambiguous_values={}
#if not self.symbols:
# self.symbols='01' # if nothing else defined, then 0 and 1 are the default states
#self.unambiguous_letters=self.symbols
else:
raise NexusError, 'Unsupported datatype: '+self.datatype
self.valid_characters=''.join(self.ambiguous_values.keys())+self.unambiguous_letters
if not self.respectcase:
self.valid_characters=self.valid_characters.lower()+self.valid_characters.upper()
#we have to sort the reverse ambig coding dict key characters:
#to be sure that it's 'ACGT':'N' and not 'GTCA':'N'
rev=dict([(i[1],i[0]) for i in self.ambiguous_values.items() if i[0]!='X'])
self.rev_ambiguous_values={}
for (k,v) in rev.items():
key=[c for c in k]
key.sort()
self.rev_ambiguous_values[''.join(key)]=v
#overwrite symbols for datype rna,dna,nucleotide
if self.datatype in ['dna','rna','nucleotide']:
self.symbols=self.alphabet.letters
if self.missing not in self.ambiguous_values:
self.ambiguous_values[self.missing]=self.unambiguous_letters+self.gap
self.ambiguous_values[self.gap]=self.gap
elif self.datatype=='standard':
if not self.symbols:
self.symbols=['1','0']
if options.has_key('missing'):
self.missing=options['missing'][0]
if options.has_key('gap'):
self.gap=options['gap'][0]
if options.has_key('equate'):
self.equate=options['equate']
if options.has_key('matchchar'):
self.matchchar=options['matchchar'][0]
if options.has_key('labels'):
self.labels=options['labels']
if options.has_key('transpose'):
raise NexusError, 'TRANSPOSE is not supported!'
self.transpose=True
if options.has_key('interleave'):
if options['interleave']==None or options['interleave'].lower()=='yes':
self.interleave=True
if options.has_key('tokens'):
self.tokens=True
if options.has_key('notokens'):
self.tokens=False
def _set(self,options):
self.set=options;
def _options(self,options):
self.options=options;
def _eliminate(self,options):
self.eliminate=options
def _taxlabels(self,options):
"""Get taxon labels."""
self.taxlabels=[]
opts=CharBuffer(options)
while True:
taxon=quotestrip(opts.next_word())
if not taxon:
break
self.taxlabels.append(taxon)
def _check_taxlabels(self,taxon):
"""Check for presence of taxon in self.taxlabels."""
# According to NEXUS standard, underscores shall be treated as spaces...,
# so checking for identity is more difficult
nextaxa=dict([(t.replace(' ','_'),t) for t in self.taxlabels])
nexid=taxon.replace(' ','_')
return nextaxa.get(nexid)
def _charlabels(self,options):
self.charlabels={}
opts=CharBuffer(options)
while True:
try:
# get id and state
w=opts.next_word()
if w is None: # McClade saves and reads charlabel-lists with terminal comma?!
break
identifier=self._resolve(w,set_type=CHARSET)
state=quotestrip(opts.next_word())
self.charlabels[identifier]=state
# check for comma or end of command
c=opts.next_nonwhitespace()
if c is None:
break
elif c!=',':
raise NexusError,'Missing \',\' in line %s.' % options
except NexusError:
raise
except:
raise NexusError,'Format error in line %s.' % options
def _charstatelabels(self,options):
# warning: charstatelabels supports only charlabels-syntax!
self._charlabels(options)
def _statelabels(self,options):
#self.charlabels=options
#print 'Command statelabels is not supported and will be ignored.'
pass
def _matrix(self,options):
if not self.ntax or not self.nchar:
raise NexusError,'Dimensions must be specified before matrix!'
taxlabels_present=(self.taxlabels!=[])
self.matrix={}
taxcount=0
block_interleave=0
#eliminate empty lines and leading/trailing whitespace
lines=[l.strip() for l in options.split('\n') if l.strip()<>'']
lineiter=iter(lines)
while 1:
try:
l=lineiter.next()
except StopIteration:
if taxcount<self.ntax:
raise NexusError, 'Not enough taxa in matrix.'
elif taxcount>self.ntax:
raise NexusError, 'Too many taxa in matrix.'
else:
break
# count the taxa and check for interleaved matrix
taxcount+=1
##print taxcount
if taxcount>self.ntax:
if not self.interleave:
raise NexusError, 'Too many taxa in matrix - should matrix be interleaved?'
else:
taxcount=1
block_interleave=1
#get taxon name and sequence
linechars=CharBuffer(l)
id=quotestrip(linechars.next_word())
l=linechars.rest().strip()
if taxlabels_present and not self._check_taxlabels(id):
raise NexusError,'Taxon '+id+' not found in taxlabels.'
chars=''
if self.interleave:
#interleaved matrix
#print 'In interleave'
if l:
chars=''.join(l.split())
else:
chars=''.join(lineiter.next().split())
else:
#non-interleaved matrix
chars=''.join(l.split())
while len(chars)<self.nchar:
l=lineiter.next()
chars+=''.join(l.split())
iupac_seq=Seq(_replace_parenthesized_ambigs(chars,self.rev_ambiguous_values),self.alphabet)
#first taxon has the reference sequence if matchhar is used
if taxcount==1:
refseq=iupac_seq
else:
if self.matchchar:
while 1:
p=iupac_seq.tostring().find(self.matchchar)
if p==-1:
break
iupac_seq=Seq(iupac_seq.tostring()[:p]+refseq[p]+iupac_seq.tostring()[p+1:],self.alphabet)
#check for invalid characters
for i,c in enumerate(iupac_seq.tostring()):
if c not in self.valid_characters and c!=self.gap and c!=self.missing:
raise NexusError, 'Taxon %s: Illegal character %s in line: %s (check dimensions / interleaving)'\
% (id,c,l[i-10:i+10])
#add sequence to matrix
if block_interleave==0:
while self.matrix.has_key(id):
if id.split('.')[-1].startswith('copy'):
id='.'.join(id.split('.')[:-1])+'.copy'+str(eval('0'+id.split('.')[-1][4:])+1)
else:
id+='.copy'
#raise NexusError, id+' already in matrix!\nError in: '+l
self.matrix[id]=iupac_seq
# add taxon name only if taxlabels is not alredy present
if not taxlabels_present:
self.taxlabels.append(id)
else:
taxon_present=self._check_taxlabels(id)
if taxon_present:
self.matrix[taxon_present]+=iupac_seq
else:
raise NexusError, 'Taxon %s not in first block of interleaved matrix.' % id
#check all sequences for length according to nchar
for taxon in self.matrix:
if len(self.matrix[taxon])!=self.nchar:
raise NexusError,'Nchar ('+str(self.nchar)+') does not match data for taxon '+taxon
def _translate(self,options):
self.translate={}
opts=CharBuffer(options)
while True:
try:
# get id and state
identifier=int(opts.next_word())
label=quotestrip(opts.next_word())
self.translate[identifier]=label
# check for comma or end of command
c=opts.next_nonwhitespace()
if c is None:
break
elif c!=',':
raise NexusError,'Missing \',\' in line %s.' % options
except NexusError:
raise
except:
raise NexusError,'Format error in line %s.' % options
def _utree(self,options):
"""Some software (clustalx) uses 'utree' to denote an unrooted tree."""
self._tree(options)
def _tree(self,options):
opts=CharBuffer(options)
name=opts.next_word()
if opts.next_nonwhitespace()!='=':
raise NexusError,'Syntax error in tree description: %s' % options[:50]
rooted=False
weight=1.0
while opts.peek_nonwhitespace()=='[':
open=opts.next_nonwhitespace()
symbol=opts.next()
if symbol!='&':
raise NexusError,'Illegal special comment [%s...] in tree description: %s' % (symbol, options[:50])
special=opts.next()
value=opts.next_until(']')
closing=opts.next()
if special=='R':
rooted=True
elif special=='U':
rooted=False
elif special=='W':
weight=float(value)
tree=Tree(name=name,weight=weight,rooted=rooted,tree=opts.rest().strip())
# if there's an active translation table, translate
if self.translate:
for n in tree.get_terminals():
try:
tree.node(n).data.taxon=safename(self.translate[int(tree.node(n).data.taxon)])
except (ValueError,KeyError):
raise NexusError,'Unable to substitue %s using \'translate\' data.' % tree.node(n).data.taxon
self.trees.append(tree)
def _apply_block_structure(self,title,lines):
block=Block('')
block.title = title
for line in lines:
block.commandlines.append(Commandline(line, title))
self.structured.append(block)
def _taxset(self, options):
name,taxa=self._get_indices(options,set_type=TAXSET)
self.taxsets[name]=_make_unique(taxa)
def _charset(self, options):
name,sites=self._get_indices(options,set_type=CHARSET)
self.charsets[name]=_make_unique(sites)
def _taxpartition(self, options):
taxpartition={}
quotelevel=False
opts=CharBuffer(options)
name=self._name_n_vector(opts)
if not name:
raise NexusError, 'Formatting error in taxpartition: %s ' % options
# now collect thesubbpartitions and parse them
# subpartitons separated by commas - which unfortunately could be part of a quoted identifier...
# this is rather unelegant, but we have to avoid double-parsing and potential change of special nexus-words
sub=''
while True:
w=opts.next()
if w is None or (w==',' and not quotelevel):
subname,subindices=self._get_indices(sub,set_type=TAXSET,separator=':')
taxpartition[subname]=_make_unique(subindices)
sub=''
if w is None:
break
else:
if w=="'":
quotelevel=not quotelevel
sub+=w
self.taxpartitions[name]=taxpartition
def _charpartition(self, options):
charpartition={}
quotelevel=False
opts=CharBuffer(options)
name=self._name_n_vector(opts)
if not name:
raise NexusError, 'Formatting error in charpartition: %s ' % options
# now collect thesubbpartitions and parse them
# subpartitons separated by commas - which unfortunately could be part of a quoted identifier...
sub=''
while True:
w=opts.next()
if w is None or (w==',' and not quotelevel):
subname,subindices=self._get_indices(sub,set_type=CHARSET,separator=':')
charpartition[subname]=_make_unique(subindices)
sub=''
if w is None:
break
else:
if w=="'":
quotelevel=not quotelevel
sub+=w
self.charpartitions[name]=charpartition
def _get_indices(self,options,set_type=CHARSET,separator='='):
"""Parse the taxset/charset specification
'1 2 3 - 5 dog cat 10- 20 \\ 3' --> [0,1,2,3,4,'dog','cat',10,13,16,19]
"""
opts=CharBuffer(options)
name=self._name_n_vector(opts,separator=separator)
indices=self._parse_list(opts,set_type=set_type)
if indices is None:
raise NexusError, 'Formatting error in line: %s ' % options
return name,indices
def _name_n_vector(self,opts,separator='='):
"""Extract name and check that it's not in vector format."""
rest=opts.rest()
name=opts.next_word()
if not name:
raise NexusError, 'Formatting error in line: %s ' % rest
name=quotestrip(name)
if opts.peek_nonwhitespace=='(':
open=opts.next_nonwhitespace()
qualifier=open.next_word()
close=opts.next_nonwhitespace()
if qualifier.lower()=='vector':
raise NexusError, 'Unsupported VECTOR format in line %s' % (options)
elif qualifier.lower()!='standard':
raise NexusError, 'Unknown qualifier %s in line %s' % (qualifier,options)
if opts.next_nonwhitespace()!=separator:
raise NexusError, 'Formatting error in line: %s ' % rest
return name
def _parse_list(self,options_buffer,set_type):
"""Parse a NEXUS list: [1, 2, 4-8\\2, dog, cat] --> [1,2,4,6,8,17-21],
(assuming dog is taxon no. 17 and cat is taxon no. 21).
"""
plain_list=[]
if options_buffer.peek_nonwhitespace():
try: # capture all possible exceptions and treat them as formatting erros, if they are not NexusError
while True:
identifier=options_buffer.next_word() # next list element
if not identifier: # end of list?
break
start=self._resolve(identifier,set_type=set_type)
if options_buffer.peek_nonwhitespace()=='-': # followd by -
end=start
step=1
# get hyphen and end of range
hyphen=options_buffer.next_nonwhitespace()
end=self._resolve(options_buffer.next_word(),set_type=set_type)
if set_type==CHARSET:
if options_buffer.peek_nonwhitespace()=='\\': # followd by \
backslash=options_buffer.next_nonwhitespace()
step=int(options_buffer.next_word()) # get backslash and step
plain_list.extend(range(start,end+1,step))
else:
if type(start)==list or type(end)==list:
raise NexusError, 'Name if character sets not allowed in range definition: %s' % identifier
start=self.taxlabels.index(start)
end=self.taxlabels.index(end)
taxrange=self.taxlabels[start:end+1]
plain_list.extend(taxrange)
else:
if type(start)==list: # start was the name of charset or taxset
plain_list.extend(start)
else: # start was an ordinary identifier
plain_list.append(start)
except NexusError:
raise
except:
return None
return plain_list
def _resolve(self,identifier,set_type=None):
"""Translate identifier in list into character/taxon index.
Characters (which are referred to by their index in Nexus.py):
Plain numbers are returned minus 1 (Nexus indices to python indices)
Text identifiers are translaterd into their indices (if plain character indentifiers),
the first hit in charlabels is returned (charlabels don't need to be unique)
or the range of indices is returned (if names of character sets).
Taxa (which are referred to by their unique name in Nexus.py):
Plain numbers are translated in their taxon name, underscores and spaces are considered equal.
Names are returned unchanged (if plain taxon identifiers), or the names in
the corresponding taxon set is returned
"""
identifier=quotestrip(identifier)
if not set_type:
raise NexusError('INTERNAL ERROR: Need type to resolve identifier.')
if set_type==CHARSET:
try:
n=int(identifier)
except ValueError:
if self.charlabels and identifier in self.charlabels.values():
for k in self.charlabels:
if self.charlabels[k]==identifier:
return k
elif self.charsets and identifier in self.charsets:
return self.charsets[identifier]
else:
raise NexusError, 'Unknown character identifier: %s' % identifier
else:
if n<=self.nchar:
return n-1
else:
raise NexusError, 'Illegal character identifier: %d>nchar (=%d).' % (identifier,self.nchar)
elif set_type==TAXSET:
try:
n=int(identifier)
except ValueError:
taxlabels_id=self._check_taxlabels(identifier)
if taxlabels_id:
return taxlabels_id
elif self.taxsets and identifier in self.taxsets:
return self.taxsets[identifier]
else:
raise NexusError, 'Unknown taxon identifier: %s' % identifier
else:
if n>0 and n<=self.ntax:
return self.taxlabels[n-1]
else:
raise NexusError, 'Illegal taxon identifier: %d>ntax (=%d).' % (identifier,self.ntax)
else:
raise NexusError('Unknown set specification: %s.'% set_type)
def _stateset(self, options):
#Not implemented
pass
def _changeset(self, options):
#Not implemented
pass
def _treeset(self, options):
#Not implemented
pass
def _treepartition(self, options):
#Not implemented
pass
def write_nexus_data_partitions(self, matrix=None, filename=None, blocksize=None, interleave=False,
exclude=[], delete=[], charpartition=None, comment='',mrbayes=False):
"""Writes a nexus file for each partition in charpartition.
Only non-excluded characters and non-deleted taxa are included, just the data block is written.
"""
if not matrix:
matrix=self.matrix
if not matrix:
return
if not filename:
filename=self.filename
if charpartition:
pfilenames={}
for p in charpartition:
total_exclude=[]+exclude
total_exclude.extend([c for c in range(self.nchar) if c not in charpartition[p]])
total_exclude=_make_unique(total_exclude)
pcomment=comment+'\nPartition: '+p+'\n'
dot=filename.rfind('.')
if dot>0:
pfilename=filename[:dot]+'_'+p+'.data'
else:
pfilename=filename+'_'+p
pfilenames[p]=pfilename
self.write_nexus_data(filename=pfilename,matrix=matrix,blocksize=blocksize,
interleave=interleave,exclude=total_exclude,delete=delete,comment=pcomment,append_sets=False,
mrbayes=mrbayes)
return pfilenames
else:
fn=self.filename+'.data'
self.write_nexus_data(filename=fn,matrix=matrix,blocksize=blocksize,interleave=interleave,
exclude=exclude,delete=delete,comment=comment,append_sets=False,
mrbayes=mrbayes)
return fn
def write_nexus_data(self, filename=None, matrix=None, exclude=[], delete=[],\
blocksize=None, interleave=False, interleave_by_partition=False,\
comment=None,omit_NEXUS=False,append_sets=True,mrbayes=False):
""" Writes a nexus file with data and sets block. Character sets and partitions
are appended by default, and are adjusted according
to excluded characters (i.e. character sets still point to the same sites (not necessarily same positions),
without including the deleted characters.
"""
if not matrix:
matrix=self.matrix
if not matrix:
return
if not filename:
filename=self.filename
if [t for t in delete if not self._check_taxlabels(t)]:
raise NexusError, 'Unknown taxa: %s' % ', '.join(sets.Set(delete).difference(sets.Set(self.taxlabels)))
if interleave_by_partition:
if not interleave_by_partition in self.charpartitions:
raise NexusError, 'Unknown partition: '+interleave_by_partition
else:
partition=self.charpartitions[interleave_by_partition]
# we need to sort the partition names by starting position before we exclude characters
names=_sort_keys_by_values(partition)
newpartition={}
for p in partition:
newpartition[p]=[c for c in partition[p] if c not in exclude]
# how many taxa and how many characters are left?
undelete=[taxon for taxon in self.taxlabels if taxon in matrix and taxon not in delete]
cropped_matrix=_seqmatrix2strmatrix(self.crop_matrix(matrix,exclude=exclude,delete=delete))
ntax_adjusted=len(undelete)
nchar_adjusted=len(cropped_matrix[undelete[0]])
if not undelete or (undelete and undelete[0]==''):
return
if isinstance(filename,str):
try:
fh=open(filename,'w')
except IOError:
raise NexusError, 'Could not open %s for writing.' % filename
elif isinstance(filename,file):
fh=filename
if not omit_NEXUS:
fh.write('#NEXUS\n')
if comment:
fh.write('['+comment+']\n')
fh.write('begin data;\n')
fh.write('\tdimensions ntax=%d nchar=%d;\n' % (ntax_adjusted, nchar_adjusted))
fh.write('\tformat datatype='+self.datatype)
if self.respectcase:
fh.write(' respectcase')
if self.missing:
fh.write(' missing='+self.missing)
if self.gap:
fh.write(' gap='+self.gap)
if self.matchchar:
fh.write(' matchchar='+self.matchchar)
if self.labels:
fh.write(' labels='+self.labels)
if self.equate:
fh.write(' equate='+self.equate)
if interleave or interleave_by_partition:
fh.write(' interleave')
fh.write(';\n')
#if self.taxlabels:
# fh.write('taxlabels '+' '.join(self.taxlabels)+';\n')
if self.charlabels:
newcharlabels=self._adjust_charlabels(exclude=exclude)
clkeys=newcharlabels.keys()
clkeys.sort()
fh.write('charlabels '+', '.join(["%s %s" % (k+1,safename(newcharlabels[k])) for k in clkeys])+';\n')
fh.write('matrix\n')
if not blocksize:
if interleave:
blocksize=70
else:
blocksize=self.nchar
# delete deleted taxa and ecxclude excluded characters...
namelength=max([len(safename(t,mrbayes=mrbayes)) for t in undelete])
if interleave_by_partition:
# interleave by partitions, but adjust partitions with regard to excluded characters
seek=0
for p in names:
fh.write('[%s: %s]\n' % (interleave_by_partition,p))
if len(newpartition[p])>0:
for taxon in undelete:
fh.write(safename(taxon,mrbayes=mrbayes).ljust(namelength+1))
fh.write(cropped_matrix[taxon][seek:seek+len(newpartition[p])]+'\n')
fh.write('\n')
else:
fh.write('[empty]\n\n')
seek+=len(newpartition[p])
elif interleave:
for seek in range(0,nchar_adjusted,blocksize):
for taxon in undelete:
fh.write(safename(taxon,mrbayes=mrbayes).ljust(namelength+1))
fh.write(cropped_matrix[taxon][seek:seek+blocksize]+'\n')
fh.write('\n')
else:
for taxon in undelete:
if blocksize<nchar_adjusted:
fh.write(safename(taxon,mrbayes=mrbayes)+'\n')
else:
fh.write(safename(taxon,mrbayes=mrbayes).ljust(namelength+1))
for seek in range(0,nchar_adjusted,blocksize):
fh.write(cropped_matrix[taxon][seek:seek+blocksize]+'\n')
fh.write(';\nend;\n')
if append_sets:
fh.write(self.append_sets(exclude=exclude,delete=delete,mrbayes=mrbayes))
fh.close()
return filename
def append_sets(self,exclude=[],delete=[],mrbayes=False):
"""Appends a sets block to <filename>."""
if not self.charsets and not self.taxsets and not self.charpartitions:
return ''
sets=['\nbegin sets']
# - now if characters have been excluded, the character sets need to be adjusted,
# so that they still point to the right character positions
# calculate a list of offsets: for each deleted character, the following character position
# in the new file will have an additional offset of -1
offset=0
offlist=[]
for c in range(self.nchar):
if c in exclude:
offset+=1
offlist.append(-1) # dummy value as these character positions are excluded
else:
offlist.append(c-offset)
# now adjust each of the character sets
for n,ns in self.charsets.items():
cset=[offlist[c] for c in ns if c not in exclude]
if cset:
sets.append('charset %s = %s' % (safename(n),_compact4nexus(cset)))
for n,s in self.taxsets.items():
tset=[safename(t,mrbayes=mrbayes) for t in s if t not in delete]
if tset:
sets.append('taxset %s = %s' % (safename(n),' '.join(tset)))
for n,p in self.charpartitions.items():
# as characters have been excluded, the partitions must be adjusted
# if a partition is empty, it will be omitted from the charpartition command
# (although paup allows charpartition part=t1:,t2:,t3:1-100)
names=_sort_keys_by_values(p)
newpartition={}
for sn in names:
nsp=[offlist[c] for c in p[sn] if c not in exclude]
if nsp:
newpartition[sn]=nsp
if newpartition:
sets.append('charpartition %s = %s' % (safename(n),\
', '.join(['%s: %s' % (sn,_compact4nexus(newpartition[sn])) for sn in names if sn in newpartition])))
# now write charpartititions, much easier than charpartitions
for n,p in self.taxpartitions.items():
names=_sort_keys_by_values(p)
newpartition={}
for sn in names:
nsp=[t for t in p[sn] if t not in delete]
if nsp:
newpartition[sn]=nsp
if newpartition:
sets.append('taxpartition %s = %s' % (safename(n),\
', '.join(['%s: %s' % (safename(sn),' '.join(map(safename,newpartition[sn]))) for sn in names if sn in newpartition])))
# add 'end' and return everything
sets.append('end;\n')
return ';\n'.join(sets)
f.close()
def export_fasta(self, filename=None, width=70):
"""Writes matrix into a fasta file: (self, filename=None, width=70)."""
if not filename:
if '.' in filename and self.filename.split('.')[-1].lower() in ['paup','nexus','nex','dat']:
filename='.'.join(self.filename.split('.')[:-1])+'.fas'
else:
filename=self.filename+'.fas'
fh=open(filename,'w')
for taxon in self.taxlabels:
fh.write('>'+safename(taxon)+'\n')
for i in range(0, len(self.matrix[taxon].tostring()), width):
fh.write(self.matrix[taxon].tostring()[i:i+width] + '\n')
fh.close()
def constant(self,matrix=None,delete=[],exclude=[]):
"""Return a list with all constant characters."""
if not matrix:
matrix=self.matrix
undelete=[t for t in self.taxlabels if t in matrix and t not in delete]
if not undelete:
return None
elif len(undelete)==1:
return [x for x in range(len(matrix[undelete[0]])) if x not in exclude]
# get the first sequence and expand all ambiguous values
constant=[(x,self.ambiguous_values.get(n.upper(),n.upper())) for
x,n in enumerate(matrix[undelete[0]].tostring()) if x not in exclude]
for taxon in undelete[1:]:
newconstant=[]
for site in constant:
#print '%d (paup=%d)' % (site[0],site[0]+1),
seqsite=matrix[taxon][site[0]].upper()
#print seqsite,'checked against',site[1],'\t',
if seqsite==self.missing or (seqsite==self.gap and self.options['gapmode'].lower()=='missing') or seqsite==site[1]:
# missing or same as before -> ok
newconstant.append(site)
elif seqsite in site[1] or site[1]==self.missing or (self.options['gapmode'].lower()=='missing' and site[1]==self.gap):
# subset of an ambig or only missing in previous -> take subset
newconstant.append((site[0],self.ambiguous_values.get(seqsite,seqsite)))
elif seqsite in self.ambiguous_values: # is it an ambig: check the intersection with prev. values
intersect=sets.Set(self.ambiguous_values[seqsite]).intersection(sets.Set(site[1]))
if intersect:
newconstant.append((site[0],''.join(intersect)))
# print 'ok'
#else:
# print 'failed'
#else:
# print 'failed'
constant=newconstant
cpos=[s[0] for s in constant]
return constant
# return [x[0] for x in constant]
def cstatus(self,site,delete=[],narrow=True):
"""Summarize character.
narrow=True: paup-mode (a c ? --> ac; ? ? ? --> ?)
narrow=false: (a c ? --> a c g t -; ? ? ? --> a c g t -)
"""
undelete=[t for t in self.taxlabels if t not in delete]
if not undelete:
return None
cstatus=[]
for t in undelete:
c=self.matrix[t][site].upper()
if self.options.get('gapmode')=='missing' and c==self.gap:
c=self.missing
if narrow and c==self.missing:
if c not in cstatus:
cstatus.append(c)
else:
cstatus.extend([b for b in self.ambiguous_values[c] if b not in cstatus])
if self.missing in cstatus and narrow and len(cstatus)>1:
cstatus=[c for c in cstatus if c!=self.missing]
cstatus.sort()
return cstatus
def weighted_stepmatrix(self,name='your_name_here',exclude=[],delete=[]):
"""Calculates a stepmatrix for weighted parsimony.
See Wheeler (1990), Cladistics 6:269-275 and
Felsenstein (1981), Biol. J. Linn. Soc. 16:183-196
"""
m=StepMatrix(self.unambiguous_letters,self.gap)
for site in [s for s in range(self.nchar) if s not in exclude]:
cstatus=self.cstatus(site,delete)
for i,b1 in enumerate(cstatus[:-1]):
for b2 in cstatus[i+1:]:
m.add(b1.upper(),b2.upper(),1)
return m.transformation().weighting().smprint(name=name)
def crop_matrix(self,matrix=None, delete=[], exclude=[]):
"""Return a matrix without deleted taxa and excluded characters."""
if not matrix:
matrix=self.matrix
if [t for t in delete if not self._check_taxlabels(t)]:
raise NexusError, 'Unknwon taxa: %s' % ', '.join(sets.Set(delete).difference(self.taxlabels))
if exclude!=[]:
undelete=[t for t in self.taxlabels if t in matrix and t not in delete]
if not undelete:
return {}
m=[matrix[k].tostring() for k in undelete]
zipped_m=zip(*m)
sitesm=[s for i,s in enumerate(zipped_m) if i not in exclude]
if sitesm==[]:
return dict([(t,Seq('',self.alphabet)) for t in undelete])
else:
zipped_sitesm=zip(*sitesm)
m=[Seq(s,self.alphabet) for s in map(''.join,zipped_sitesm)]
return dict(zip(undelete,m))
else:
return dict([(t,matrix[t]) for t in self.taxlabels if t in matrix and t not in delete])
def bootstrap(self,matrix=None,delete=[],exclude=[]):
"""Return a bootstrapped matrix."""
if not matrix:
matrix=self.matrix
seqobjects=isinstance(matrix[matrix.keys()[0]],Seq) # remember if Seq objects
cm=self.crop_matrix(delete=delete,exclude=exclude) # crop data out
if not cm: # everything deleted?
return {}
elif len(cm[cm.keys()[0]])==0: # everything excluded?
return cm
undelete=[t for t in self.taxlabels if t in cm]
if seqobjects:
sitesm=zip(*[cm[t].tostring() for t in undelete])
alphabet=matrix[matrix.keys()[0]].alphabet
else:
sitesm=zip(*[cm[t] for t in undelete])
bootstrapsitesm=[sitesm[random.randint(0,len(sitesm)-1)] for i in range(len(sitesm))]
bootstrapseqs=map(''.join,zip(*bootstrapsitesm))
if seqobjects:
bootstrapseqs=[Seq(s,alphabet) for s in bootstrapseqs]
return dict(zip(undelete,bootstrapseqs))
def add_sequence(self,name,sequence):
"""Adds a sequence to the matrix."""
if not name:
raise NexusError, 'New sequence must have a name'
diff=self.nchar-len(sequence)
if diff<0:
self.insert_gap(self.nchar,-diff)
elif diff>0:
sequence+=self.missing*diff
self.matrix[name]=Seq(sequence,self.alphabet)
self.ntax+=1
self.taxlabels.append(name)
#taxlabels?
def insert_gap(self,pos,n=1,leftgreedy=False):
"""Add a gap into the matrix and adjust charsets and partitions.
pos=0: first position
pos=nchar: last position
"""
def _adjust(set,x,d,leftgreedy=False):
"""Adjusts chartacter sets if gaps are inserted, taking care of
new gaps within a coherent character set."""
# if 3 gaps are inserted at pos. 9 in a set that looks like 1 2 3 8 9 10 11 13 14 15
# then the adjusted set will be 1 2 3 8 9 10 11 12 13 14 15 16 17 18
# but inserting into position 8 it will stay like 1 2 3 11 12 13 14 15 16 17 18
set.sort()
addpos=0
for i,c in enumerate(set):
if c>=x:
set[i]=c+d
# if we add gaps within a group of characters, we want the gap position included in this group
if c==x:
if leftgreedy or (i>0 and set[i-1]==c-1):
addpos=i
if addpos>0:
set[addpos:addpos]=range(x,x+d)
return set
if pos<0 or pos>self.nchar:
raise NexusError('Illegal gap position: %d' % pos)
if n==0:
return
sitesm=zip(*[self.matrix[t].tostring() for t in self.taxlabels])
sitesm[pos:pos]=[['-']*len(self.taxlabels)]*n
# #self.matrix=dict([(taxon,Seq(map(''.join,zip(*sitesm))[i],self.alphabet)) for\
# i,taxon in enumerate(self.taxlabels)])
zipped=zip(*sitesm)
mapped=map(''.join,zipped)
listed=[(taxon,Seq(mapped[i],self.alphabet)) for i,taxon in enumerate(self.taxlabels)]
self.matrix=dict(listed)
self.nchar+=n
# now adjust character sets
for i,s in self.charsets.items():
self.charsets[i]=_adjust(s,pos,n,leftgreedy=leftgreedy)
for p in self.charpartitions:
for sp,s in self.charpartitions[p].items():
self.charpartitions[p][sp]=_adjust(s,pos,n,leftgreedy=leftgreedy)
# now adjust character state labels
self.charlabels=self._adjust_charlabels(insert=[pos]*n)
return self.charlabels
def _adjust_charlabels(self,exclude=None,insert=None):
"""Return adjusted indices of self.charlabels if characters are excluded or inserted."""
if exclude and insert:
raise NexusError, 'Can\'t exclude and insert at the same time'
if not self.charlabels:
return None
labels=self.charlabels.keys()
labels.sort()
newcharlabels={}
if exclude:
exclude.sort()
exclude.append(sys.maxint)
excount=0
for c in labels:
if not c in exclude:
while c>exclude[excount]:
excount+=1
newcharlabels[c-excount]=self.charlabels[c]
elif insert:
insert.sort()
insert.append(sys.maxint)
icount=0
for c in labels:
while c>=insert[icount]:
icount+=1
newcharlabels[c+icount]=self.charlabels[c]
else:
return self.charlabels
return newcharlabels
def invert(self,charlist):
"""Returns all character indices that are not in charlist."""
return [c for c in range(self.nchar) if c not in charlist]
def gaponly(self,include_missing=False):
"""Return gap-only sites."""
gap=sets.Set(self.gap)
if include_missing:
gap.add(self.missing)
sitesm=zip(*[self.matrix[t].tostring() for t in self.taxlabels])
gaponly=[i for i,site in enumerate(sitesm) if sets.Set(site).issubset(gap)]
return gaponly
def terminal_gap_to_missing(self,missing=None,skip_n=True):
"""Replaces all terminal gaps with missing character.
Mixtures like ???------??------- are properly resolved."""
if not missing:
missing=self.missing
replace=[self.missing,self.gap]
if not skip_n:
replace.extend(['n','N'])
for taxon in self.taxlabels:
sequence=self.matrix[taxon].tostring()
length=len(sequence)
start,end=get_start_end(sequence,skiplist=replace)
if start==-1 and end==-1:
sequence=missing*length
else:
sequence=sequence[:end+1]+missing*(length-end-1)
sequence=start*missing+sequence[start:]
assert length==len(sequence), 'Illegal sequence manipulation in Nexus.termial_gap_to_missing in taxon %s' % taxon
self.matrix[taxon]=Seq(sequence,self.alphabet)
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/Nexus/Nexus.py
|
Python
|
apache-2.0
| 68,776
|
[
"Biopython"
] |
1d4bc1a88edf0e62d379a37ba5d0b5aa04ea13a70c029c6b1074092298b048dd
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.